code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel SCREAMING_SNAKE_CASE__ : Tuple = { """gwf-440k""": { """url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""", """sample_rate""": 4_80_00, """sample_size""": 6_55_36, }, """jmann-small-190k""": { """url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""", """sample_rate""": 4_80_00, """sample_size""": 6_55_36, }, """jmann-large-580k""": { """url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""", """sample_rate""": 4_80_00, """sample_size""": 13_10_72, }, """maestro-uncond-150k""": { """url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""", """sample_rate""": 1_60_00, """sample_size""": 6_55_36, }, """unlocked-uncond-250k""": { """url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""", """sample_rate""": 1_60_00, """sample_size""": 6_55_36, }, """honk-140k""": { """url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""", """sample_rate""": 1_60_00, """sample_size""": 6_55_36, }, } def __lowercase ( snake_case, snake_case ): """simple docstring""" return torch.atana(snake_case, snake_case ) / math.pi * 2 def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ :Optional[int] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(snake_case, snake_case ) class lowerCamelCase_ ( lowerCamelCase ): pass class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase ): """simple docstring""" super().__init__() __magic_name__ :List[str] = DiffusionAttnUnetaD(__lowerCAmelCase , n_attn_layers=4 ) __magic_name__ :Any = deepcopy(self.diffusion ) __magic_name__ :Tuple = torch.quasirandom.SobolEngine(1 , scramble=__lowerCAmelCase ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :List[Any] = MODELS_MAP[model_name]['''url'''] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' SCREAMING_SNAKE_CASE__ : Optional[int] = { """1""": """resnets.0""", """2""": """attentions.0""", """3""": """resnets.1""", """4""": """attentions.1""", """5""": """resnets.2""", """6""": """attentions.2""", } SCREAMING_SNAKE_CASE__ : List[str] = { """8""": """resnets.0""", """9""": """attentions.0""", """10""": """resnets.1""", """11""": """attentions.1""", """12""": """resnets.2""", """13""": """attentions.2""", } SCREAMING_SNAKE_CASE__ : int = { """1""": """resnets.0""", """2""": """attentions.0""", """3""": """resnets.1""", """4""": """attentions.1""", """5""": """resnets.2""", """6""": """attentions.2""", """8""": """resnets.3""", """9""": """attentions.3""", """10""": """resnets.4""", """11""": """attentions.4""", """12""": """resnets.5""", """13""": """attentions.5""", } SCREAMING_SNAKE_CASE__ : List[str] = { """0""": """resnets.0""", """1""": """resnets.1""", """2""": """resnets.2""", """4""": """resnets.0""", """5""": """resnets.1""", """6""": """resnets.2""", } SCREAMING_SNAKE_CASE__ : str = { """skip""": """conv_skip""", """main.0""": """conv_1""", """main.1""": """group_norm_1""", """main.3""": """conv_2""", """main.4""": """group_norm_2""", } SCREAMING_SNAKE_CASE__ : Optional[Any] = { """norm""": """group_norm""", """qkv_proj""": ["""query""", """key""", """value"""], """out_proj""": ["""proj_attn"""], } def __lowercase ( snake_case ): """simple docstring""" if name.startswith('''skip''' ): return name.replace('''skip''', RES_CONV_MAP['''skip'''] ) # name has to be of format main.{digit} if not name.startswith('''main.''' ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def __lowercase ( snake_case ): """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(snake_case ) and not isinstance(snake_case, snake_case ): return name.replace(snake_case, snake_case ) elif name.startswith(snake_case ): return [name.replace(snake_case, snake_case ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def __lowercase ( snake_case, snake_case=1_3 ): """simple docstring""" __magic_name__ :List[Any] = input_string if string.split('''.''' )[0] == "timestep_embed": return string.replace('''timestep_embed''', '''time_proj''' ) __magic_name__ :Dict = 0 if string.startswith('''net.3.''' ): depth += 1 __magic_name__ :int = string[6:] elif string.startswith('''net.''' ): __magic_name__ :str = string[4:] while string.startswith('''main.7.''' ): depth += 1 __magic_name__ :List[Any] = string[7:] if string.startswith('''main.''' ): __magic_name__ :Optional[int] = string[5:] # mid block if string[:2].isdigit(): __magic_name__ :Optional[Any] = string[:2] __magic_name__ :int = string[2:] else: __magic_name__ :Dict = string[0] __magic_name__ :Dict = string[1:] if depth == max_depth: __magic_name__ :Optional[int] = MID_NUM_TO_LAYER[layer_num] __magic_name__ :Tuple = '''mid_block''' elif depth > 0 and int(snake_case ) < 7: __magic_name__ :List[Any] = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ :Optional[Any] = f'''down_blocks.{depth}''' elif depth > 0 and int(snake_case ) > 7: __magic_name__ :int = UP_NUM_TO_LAYER[layer_num] __magic_name__ :Union[str, Any] = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ :Dict = DEPTH_0_TO_LAYER[layer_num] __magic_name__ :Any = f'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else '''down_blocks.0''' if not string_left.startswith('''.''' ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ :str = string_left[1:] if "resnets" in new_layer: __magic_name__ :Optional[Any] = convert_resconv_naming(snake_case ) elif "attentions" in new_layer: __magic_name__ :List[Any] = convert_attn_naming(snake_case ) __magic_name__ :Optional[Any] = new_string_left if not isinstance(snake_case, snake_case ): __magic_name__ :List[Any] = prefix + '''.''' + new_layer + '''.''' + string_left else: __magic_name__ :str = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left] return new_string def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = {} for k, v in state_dict.items(): if k.endswith('''kernel''' ): # up- and downsample layers, don't have trainable weights continue __magic_name__ :Tuple = rename(snake_case ) # check if we need to transform from Conv => Linear for attention if isinstance(snake_case, snake_case ): __magic_name__ :Any = transform_conv_attns(snake_case, snake_case, snake_case ) else: __magic_name__ :List[Any] = v return new_state_dict def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" if len(snake_case ) == 1: if len(v.shape ) == 3: # weight __magic_name__ :int = v[:, :, 0] else: # bias __magic_name__ :Union[str, Any] = v else: # qkv matrices __magic_name__ :Tuple = v.shape[0] __magic_name__ :Optional[Any] = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ :str = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ :Any = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) __magic_name__ :Optional[int] = args.model_path.split('''/''' )[-1].split('''.''' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ :Optional[Any] = download(snake_case ) __magic_name__ :int = MODELS_MAP[model_name]['''sample_rate'''] __magic_name__ :Optional[int] = MODELS_MAP[model_name]['''sample_size'''] __magic_name__ :List[Any] = Object() __magic_name__ :Any = sample_size __magic_name__ :str = sample_rate __magic_name__ :str = 0 __magic_name__ :Tuple = UNetaDModel(sample_size=snake_case, sample_rate=snake_case ) __magic_name__ :Any = diffusers_model.state_dict() __magic_name__ :Dict = DiffusionUncond(snake_case ) orig_model.load_state_dict(torch.load(args.model_path, map_location=snake_case )['''state_dict'''] ) __magic_name__ :List[Any] = orig_model.diffusion_ema.eval() __magic_name__ :Union[str, Any] = orig_model.state_dict() __magic_name__ :Tuple = rename_orig_weights(snake_case ) __magic_name__ :int = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ :List[str] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(snake_case ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith('''kernel''' ) for k in list(snake_case ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ :Dict = value.squeeze() __magic_name__ :Optional[int] = value diffusers_model.load_state_dict(snake_case ) __magic_name__ :Tuple = 1_0_0 __magic_name__ :List[Any] = 3_3 __magic_name__ :Union[str, Any] = IPNDMScheduler(num_train_timesteps=snake_case ) __magic_name__ :Any = torch.manual_seed(snake_case ) __magic_name__ :List[str] = torch.randn([1, 2, config.sample_size], generator=snake_case ).to(snake_case ) __magic_name__ :Any = torch.linspace(1, 0, steps + 1, device=snake_case )[:-1] __magic_name__ :Optional[Any] = get_crash_schedule(snake_case ) __magic_name__ :int = DanceDiffusionPipeline(unet=snake_case, scheduler=snake_case ) __magic_name__ :Any = torch.manual_seed(3_3 ) __magic_name__ :int = pipe(num_inference_steps=snake_case, generator=snake_case ).audios __magic_name__ :Optional[Any] = sampling.iplms_sample(snake_case, snake_case, snake_case, {} ) __magic_name__ :Tuple = generated.clamp(-1, 1 ) __magic_name__ :Tuple = (generated - audio).abs().sum() __magic_name__ :Optional[Any] = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('''Diff sum''', snake_case ) print('''Diff max''', snake_case ) assert diff_max < 1E-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") SCREAMING_SNAKE_CASE__ : str = parser.parse_args() main(args)
0
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase__ (lowerCAmelCase_=None ): '''simple docstring''' if subparsers is not None: __SCREAMING_SNAKE_CASE = subparsers.add_parser("env" ) else: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.__version__ __SCREAMING_SNAKE_CASE = torch.cuda.is_available() __SCREAMING_SNAKE_CASE = is_xpu_available() __SCREAMING_SNAKE_CASE = is_npu_available() __SCREAMING_SNAKE_CASE = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict() __SCREAMING_SNAKE_CASE = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(lowerCAmelCase_ ), "PyTorch NPU available": str(lowerCAmelCase_ ), "System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __SCREAMING_SNAKE_CASE = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) __SCREAMING_SNAKE_CASE = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else f"""\t{accelerate_config}""" ) print(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = accelerate_config return info def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = env_command_parser() __SCREAMING_SNAKE_CASE = parser.parse_args() env_command(lowerCAmelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
682
0
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint __snake_case = { '''169M''': 1_2, '''430M''': 2_4, '''1B5''': 2_4, '''3B''': 3_2, '''7B''': 3_2, '''14B''': 4_0, } __snake_case = { '''169M''': 7_6_8, '''430M''': 1_0_2_4, '''1B5''': 2_0_4_8, '''3B''': 2_5_6_0, '''7B''': 4_0_9_6, '''14B''': 5_1_2_0, } def _A ( _lowercase ) -> List[str]: """simple docstring""" __UpperCamelCase = list(state_dict.keys() ) for name in state_dict_keys: __UpperCamelCase = state_dict.pop(_lowercase ) # emb -> embedding if name.startswith('emb.' ): __UpperCamelCase = name.replace('emb.' , 'embeddings.' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('blocks.0.ln0' ): __UpperCamelCase = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' ) # att -> attention __UpperCamelCase = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _lowercase ) # ffn -> feed_forward __UpperCamelCase = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _lowercase ) # time_mix_k -> time_mix_key and reshape if name.endswith('.time_mix_k' ): __UpperCamelCase = name.replace('.time_mix_k' , '.time_mix_key' ) # time_mix_v -> time_mix_value and reshape if name.endswith('.time_mix_v' ): __UpperCamelCase = name.replace('.time_mix_v' , '.time_mix_value' ) # time_mix_r -> time_mix_key and reshape if name.endswith('.time_mix_r' ): __UpperCamelCase = name.replace('.time_mix_r' , '.time_mix_receptance' ) if name != "head.weight": __UpperCamelCase = 'rwkv.' + name __UpperCamelCase = weight return state_dict def _A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=None ) -> Any: """simple docstring""" if tokenizer_file is None: print('No `--tokenizer_file` provided, we will use the default tokenizer.' ) __UpperCamelCase = 5_02_77 __UpperCamelCase = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' ) else: __UpperCamelCase = PreTrainedTokenizerFast(tokenizer_file=_lowercase ) __UpperCamelCase = len(_lowercase ) tokenizer.save_pretrained(_lowercase ) # 2. Build the config __UpperCamelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: __UpperCamelCase = candidate break if size is None: raise ValueError('Could not infer the size, please provide it with the `--size` argument.' ) if size not in possible_sizes: raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' ) __UpperCamelCase = RwkvConfig( vocab_size=_lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(_lowercase ) # 3. Download model file then convert state_dict __UpperCamelCase = hf_hub_download(_lowercase , _lowercase ) __UpperCamelCase = torch.load(_lowercase , map_location='cpu' ) __UpperCamelCase = convert_state_dict(_lowercase ) # 4. Split in shards and save __UpperCamelCase, __UpperCamelCase = shard_checkpoint(_lowercase ) for shard_file, shard in shards.items(): torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) ) if index is not None: __UpperCamelCase = os.path.join(_lowercase , _lowercase ) # Save the index as well with open(_lowercase , 'w' , encoding='utf-8' ) as f: __UpperCamelCase = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '\n' f.write(_lowercase ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( 'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' ) __UpperCamelCase = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: __UpperCamelCase = torch.load(os.path.join(_lowercase , _lowercase ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowercase , _lowercase ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('Please provide a `model_name` to push the model to the Hub.' ) __UpperCamelCase = AutoModelForCausalLM.from_pretrained(_lowercase ) model.push_to_hub(_lowercase , max_shard_size='2GB' ) tokenizer.push_to_hub(_lowercase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.''' ) parser.add_argument( '''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.''' ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.''' ) parser.add_argument( '''--tokenizer_file''', default=None, type=str, help='''Path to the tokenizer file to use (if not provided, only the model is converted).''', ) parser.add_argument( '''--size''', default=None, type=str, help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Push to the Hub the converted model.''', ) parser.add_argument( '''--model_name''', default=None, type=str, help='''Name of the pushed model on the Hub, including the username / organization.''', ) __snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
1
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a__ : int = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a__ : Union[str, Any] = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' a__ : Optional[Any] = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def remove_articles(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams] __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for sgram, scount in sgramcounter.items(): __SCREAMING_SNAKE_CASE = scount * numref __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for cgram, ccount in cgramcounter.items(): __SCREAMING_SNAKE_CASE = ccount * numref # KEEP __SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep __SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __SCREAMING_SNAKE_CASE = 0 if keepscore_precision > 0 or keepscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep __SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 if addscore_precision > 0 or addscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = ssent.split(" " ) __SCREAMING_SNAKE_CASE = csent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for rsent in rsents: __SCREAMING_SNAKE_CASE = rsent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4 __SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ): '''simple docstring''' if lowercase: __SCREAMING_SNAKE_CASE = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sentence if not return_str: __SCREAMING_SNAKE_CASE = normalized_sent.split() return normalized_sent def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError("Sources length must match predictions and references lengths." ) __SCREAMING_SNAKE_CASE = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) __SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ ) return 100 * sari_score def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] __SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) return result
682
0
from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase_ = 1_0_0 UpperCAmelCase_ = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase_ = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} _A = set() _A = 42 _A = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 5_000 ) -> int | None: for number_to_partition in range(1 , _snake_case ): if len(partition(_snake_case ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f'{solution() = }')
2
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : str ) -> Any: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple: __SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str: __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : str = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : int = True def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict: __SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : int ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: # This regression test was failing with PyTorch < 1.3 ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def UpperCAmelCase_ ( self : Optional[int] ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> int: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) ) __SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
682
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging lowerCAmelCase : int = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = ["""input_features""", """attention_mask"""] def __init__( self , A_=80 , A_=16000 , A_=0.0 , A_=10 , A_=25 , A_="hamming_window" , A_=32_768.0 , A_=0.97 , A_=1.0 , A_=True , A_=True , A_=False , **A_ , )-> List[str]: '''simple docstring''' super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ ) UpperCamelCase = feature_size UpperCamelCase = sampling_rate UpperCamelCase = padding_value UpperCamelCase = hop_length UpperCamelCase = win_length UpperCamelCase = frame_signal_scale UpperCamelCase = preemphasis_coeff UpperCamelCase = mel_floor UpperCamelCase = normalize_means UpperCamelCase = normalize_vars UpperCamelCase = win_function UpperCamelCase = return_attention_mask UpperCamelCase = win_length * sampling_rate // 1000 UpperCamelCase = hop_length * sampling_rate // 1000 UpperCamelCase = optimal_fft_length(self.sample_size ) UpperCamelCase = (self.n_fft // 2) + 1 def UpperCAmelCase_ ( self , A_ )-> np.ndarray: '''simple docstring''' if self.win_function == "hamming_window": UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ ) else: UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function ) UpperCamelCase = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) UpperCamelCase = spectrogram( one_waveform * self.frame_signal_scale , window=A_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A_ , preemphasis=self.preemphasis_coeff , mel_filters=A_ , mel_floor=self.mel_floor , log_mel='log' , ) return msfc_features.T def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Dict: '''simple docstring''' if self.normalize_means: UpperCamelCase = x[:input_length].mean(axis=0 ) UpperCamelCase = np.subtract(A_ , A_ ) if self.normalize_vars: UpperCamelCase = x[:input_length].std(axis=0 ) UpperCamelCase = np.divide(A_ , A_ ) if input_length < x.shape[0]: UpperCamelCase = padding_value # make sure array is in float32 UpperCamelCase = x.astype(np.floataa ) return x def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[np.ndarray]: '''simple docstring''' UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A_ , A_ , self.padding_value ) for x, n in zip(A_ , A_ )] def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , )-> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) UpperCamelCase = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): UpperCamelCase = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase = [raw_speech] # extract fbank features UpperCamelCase = [self._extract_mfsc_features(A_ ) for one_waveform in raw_speech] # convert into correct format for padding UpperCamelCase = BatchFeature({'input_features': features} ) UpperCamelCase = self.pad( A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , ) # make sure list is in array format UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , A_ ): UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features] UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: UpperCamelCase = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCamelCase = ( np.array(A_ , dtype=np.intaa ) if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=A_ ) if return_tensors is not None: UpperCamelCase = padded_inputs.convert_to_tensors(A_ ) return padded_inputs
3
"""simple docstring""" import os def UpperCAmelCase__ (): '''simple docstring''' with open(os.path.dirname(lowerCAmelCase_ ) + "/p022_names.txt" ) as file: __SCREAMING_SNAKE_CASE = str(file.readlines()[0] ) __SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," ) names.sort() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, name in enumerate(lowerCAmelCase_ ): for letter in name: name_score += ord(lowerCAmelCase_ ) - 64 total_score += (i + 1) * name_score __SCREAMING_SNAKE_CASE = 0 return total_score if __name__ == "__main__": print(solution())
682
0
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _SCREAMING_SNAKE_CASE (): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowerCAmelCase = '__test_patch_submodule_mock__' with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _SCREAMING_SNAKE_CASE (): assert _test_patching.open is open lowerCAmelCase = '__test_patch_submodule_builtin_mock__' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , 'open' , _UpperCAmelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _SCREAMING_SNAKE_CASE (): # pandas.read_csv is not present in _test_patching lowerCAmelCase = '__test_patch_submodule_missing_mock__' with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCAmelCase ): pass def _SCREAMING_SNAKE_CASE (): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point lowerCAmelCase = '__test_patch_submodule_missing_builtin_mock__' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , 'len' , _UpperCAmelCase ) is None with patch_submodule(_test_patching , 'len' , _UpperCAmelCase ): assert _test_patching.len is mock assert _test_patching.len is len def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '__test_patch_submodule_start_and_stop_mock__' lowerCAmelCase = patch_submodule(_test_patching , 'open' , _UpperCAmelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _SCREAMING_SNAKE_CASE (): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowerCAmelCase = '__test_patch_submodule_successive_join__' lowerCAmelCase = '__test_patch_submodule_successive_dirname__' lowerCAmelCase = '__test_patch_submodule_successive_rename__' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '__test_patch_submodule_doesnt_exist_mock__' with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCAmelCase ): pass with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCAmelCase ): pass
4
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1.5 __SCREAMING_SNAKE_CASE = int(factor * num_class_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: __SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4: break else: __SCREAMING_SNAKE_CASE = int(factor * num_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ ) with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( f"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: __SCREAMING_SNAKE_CASE = class_images[count] count += 1 try: __SCREAMING_SNAKE_CASE = requests.get(images["url"] ) if img.status_code == 200: __SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ ) return parser.parse_args() if __name__ == "__main__": a__ : Optional[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
682
0
'''simple docstring''' import argparse import os import re import packaging.version _lowercase = """examples/""" _lowercase = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } _lowercase = { """init""": """src/diffusers/__init__.py""", """setup""": """setup.py""", } _lowercase = """README.md""" def A (__lowerCamelCase :Any , __lowerCamelCase :Optional[int] , __lowerCamelCase :Optional[Any] ): with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowerCAmelCase = f.read() _lowerCAmelCase , _lowerCAmelCase = REPLACE_PATTERNS[pattern] _lowerCAmelCase = replace.replace("""VERSION""" , __lowerCamelCase ) _lowerCAmelCase = re_pattern.sub(__lowerCamelCase , __lowerCamelCase ) with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCamelCase ) def A (__lowerCamelCase :Optional[int] ): for folder, directories, fnames in os.walk(__lowerCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , pattern="""examples""" ) def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if not patch: update_version_in_examples(__lowerCamelCase ) def A (): _lowerCAmelCase = """🤗 Transformers currently provides the following architectures""" _lowerCAmelCase = """1. Want to contribute a new model?""" with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowerCAmelCase = f.readlines() # Find the start of the list. _lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): _lowerCAmelCase = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCamelCase ) def A (): with open(REPLACE_FILES["""init"""] , """r""" ) as f: _lowerCAmelCase = f.read() _lowerCAmelCase = REPLACE_PATTERNS["""init"""][0].search(__lowerCamelCase ).groups()[0] return packaging.version.parse(__lowerCamelCase ) def A (__lowerCamelCase :Union[str, Any]=False ): _lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: _lowerCAmelCase = default_version.base_version elif patch: _lowerCAmelCase = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: _lowerCAmelCase = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. _lowerCAmelCase = input(f'Which version are you releasing? [{default_version}]' ) if len(__lowerCamelCase ) == 0: _lowerCAmelCase = default_version print(f'Updating version to {version}.' ) global_version_update(__lowerCamelCase , patch=__lowerCamelCase ) def A (): _lowerCAmelCase = get_version() _lowerCAmelCase = f'{current_version.major}.{current_version.minor + 1}.0.dev0' _lowerCAmelCase = current_version.base_version # Check with the user we got that right. _lowerCAmelCase = input(f'Which version are we developing now? [{dev_version}]' ) if len(__lowerCamelCase ) == 0: _lowerCAmelCase = dev_version print(f'Updating version to {version}.' ) global_version_update(__lowerCamelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") _lowercase = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
5
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a__ : str = logging.get_logger(__name__) class UpperCamelCase_ ( enum.Enum): """simple docstring""" snake_case__ : Optional[int] = 0 snake_case__ : Dict = 1 @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = "generated" def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if truncation is not None: __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = generate_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None and return_type is None: __SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __SCREAMING_SNAKE_CASE = return_type if clean_up_tokenization_spaces is not None: __SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces if stop_sequence is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __SCREAMING_SNAKE_CASE = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]: return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , UpperCAmelCase__ ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) __SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],) __SCREAMING_SNAKE_CASE = True elif isinstance(args[0] , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (prefix + args[0],) __SCREAMING_SNAKE_CASE = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) __SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) if ( isinstance(args[0] , UpperCAmelCase__ ) and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] ) and all(len(UpperCAmelCase__ ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple: __SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ ) return inputs def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any: if self.framework == "pt": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape elif self.framework == "tf": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy() __SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length ) __SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) __SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output_ids.shape[0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __SCREAMING_SNAKE_CASE = { F"""{self.return_name}_text""": self.tokenizer.decode( UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ) } records.append(UpperCAmelCase__ ) return records @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "summary" def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "translation" def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]: if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ): return self.tokenizer._build_translation_inputs( *UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ ) else: return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ ) if src_lang is not None: __SCREAMING_SNAKE_CASE = src_lang if tgt_lang is not None: __SCREAMING_SNAKE_CASE = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task ) __SCREAMING_SNAKE_CASE = task.split("_" ) if task and len(UpperCAmelCase__ ) == 4: # translation, XX, to YY __SCREAMING_SNAKE_CASE = items[1] __SCREAMING_SNAKE_CASE = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask _lowerCamelCase = logging.getLogger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "token-classification" def __init__( self :Dict , __A :int ) -> Union[str, Any]: """simple docstring""" if type(__A ) == dict: SCREAMING_SNAKE_CASE__ = Namespace(**__A ) SCREAMING_SNAKE_CASE__ = import_module("""tasks""" ) try: SCREAMING_SNAKE_CASE__ = getattr(__A , hparams.task_type ) SCREAMING_SNAKE_CASE__ = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) SCREAMING_SNAKE_CASE__ = self.token_classification_task.get_labels(hparams.labels ) SCREAMING_SNAKE_CASE__ = CrossEntropyLoss().ignore_index super().__init__(__A , len(self.labels ) , self.mode ) def _snake_case ( self :Tuple , **__A :Optional[Any] ) -> List[str]: """simple docstring""" return self.model(**__A ) def _snake_case ( self :int , __A :List[Any] , __A :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE__ = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE__ = self(**__A ) SCREAMING_SNAKE_CASE__ = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def _snake_case ( self :List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.hparams for mode in ["train", "dev", "test"]: SCREAMING_SNAKE_CASE__ = self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __A ) SCREAMING_SNAKE_CASE__ = torch.load(__A ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) SCREAMING_SNAKE_CASE__ = self.token_classification_task.read_examples_from_file(args.data_dir , __A ) SCREAMING_SNAKE_CASE__ = self.token_classification_task.convert_examples_to_features( __A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , __A ) torch.save(__A , __A ) def _snake_case ( self :Any , __A :int , __A :int , __A :bool = False ) -> DataLoader: """simple docstring""" SCREAMING_SNAKE_CASE__ = self._feature_file(__A ) logger.info("""Loading features from cached file %s""" , __A ) SCREAMING_SNAKE_CASE__ = torch.load(__A ) SCREAMING_SNAKE_CASE__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) SCREAMING_SNAKE_CASE__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: SCREAMING_SNAKE_CASE__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: SCREAMING_SNAKE_CASE__ = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) SCREAMING_SNAKE_CASE__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A ) def _snake_case ( self :List[Any] , __A :Optional[Any] , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" """Compute validation""" "" SCREAMING_SNAKE_CASE__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE__ = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE__ = self(**__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs[:2] SCREAMING_SNAKE_CASE__ = logits.detach().cpu().numpy() SCREAMING_SNAKE_CASE__ = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _snake_case ( self :Optional[int] , __A :List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean() SCREAMING_SNAKE_CASE__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.argmax(__A , axis=2 ) SCREAMING_SNAKE_CASE__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE__ = dict(enumerate(self.labels ) ) SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) SCREAMING_SNAKE_CASE__ = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(__A , __A ), """precision""": precision_score(__A , __A ), """recall""": recall_score(__A , __A ), """f1""": fa_score(__A , __A ), } SCREAMING_SNAKE_CASE__ = dict(results.items() ) SCREAMING_SNAKE_CASE__ = results return ret, preds_list, out_label_list def _snake_case ( self :str , __A :List[str] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(__A ) SCREAMING_SNAKE_CASE__ = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _snake_case ( self :Any , __A :List[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(__A ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 SCREAMING_SNAKE_CASE__ = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _snake_case ( __A :Optional[int] , __A :int ) -> Tuple: """simple docstring""" BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( """--task_type""" , default="""NER""" , type=__A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=__A , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=__A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=__A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) _lowerCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd()) _lowerCamelCase = parser.parse_args() _lowerCamelCase = NERTransformer(args) _lowerCamelCase = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 _lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True)) _lowerCamelCase = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
6
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
0
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = XLMProphetNetTokenizer UpperCAmelCase : Tuple = False UpperCAmelCase : List[str] = True def lowerCAmelCase_ ( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing _A = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self : Any ): _A = '[PAD]' _A = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '[PAD]' ) self.assertEqual(vocab_keys[1] , '[CLS]' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 1_012 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_012 ) def lowerCAmelCase_ ( self : List[str] ): _A = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) _A = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _A = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] , ) @cached_property def lowerCAmelCase_ ( self : Any ): return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def lowerCAmelCase_ ( self : Tuple ): _A = 'Hello World!' _A = [35_389, 6_672, 49, 2] self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): # fmt: off _A = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
7
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=None , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # create attention mask __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.seq_length // 2 __SCREAMING_SNAKE_CASE = 0 # first forward pass __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __SCREAMING_SNAKE_CASE = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1 __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = random_other_next_tokens # append to next input_ids and attn_mask __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , ) # get two different outputs __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) # first forward pass __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[ "last_hidden_state" ] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = BioGptForTokenClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> str: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () snake_case__ : Tuple = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Optional[Any] = False def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : int ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = "left" # Define PAD Token = EOS Token = 50256 __SCREAMING_SNAKE_CASE = tokenizer.eos_token __SCREAMING_SNAKE_CASE = model.config.eos_token_id # use different length sentences to test batching __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little", "Today, I", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs["input_ids"].to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , ) __SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = "multi_label_classification" __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = 4_2_3_8_4 __SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( **UpperCAmelCase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
'''simple docstring''' import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase ): @require_torch def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = pipeline( task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused') __A : str = load_dataset('ashraq/esc50') __A : str = dataset['train']['audio'][-1]['array'] __A : Union[str, Any] = audio_classifier(_UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner']) self.assertEqual( nested_simplify(_UpperCAmelCase) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , ) @unittest.skip('No models are available in TF') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @slow @require_torch def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = pipeline( task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , ) # This is an audio of a dog __A : Any = load_dataset('ashraq/esc50') __A : Dict = dataset['train']['audio'][-1]['array'] __A : Tuple = audio_classifier(_UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner']) self.assertEqual( nested_simplify(_UpperCAmelCase) , [ {'score': 0.999, 'label': 'Sound of a dog'}, {'score': 0.001, 'label': 'Sound of vaccum cleaner'}, ] , ) __A : Tuple = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner']) self.assertEqual( nested_simplify(_UpperCAmelCase) , [ [ {'score': 0.999, 'label': 'Sound of a dog'}, {'score': 0.001, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) __A : List[str] = audio_classifier( [audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5) self.assertEqual( nested_simplify(_UpperCAmelCase) , [ [ {'score': 0.999, 'label': 'Sound of a dog'}, {'score': 0.001, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) @unittest.skip('No models are available in TF') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass
8
"""simple docstring""" import os import pytest from attr import dataclass a__ : int = '''us-east-1''' # defaults region @dataclass class UpperCamelCase_ : """simple docstring""" snake_case__ : str snake_case__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" snake_case__ : Optional[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } snake_case__ : Tuple = {**hyperparameters, "max_steps": 1000} @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self : int ) -> str: return F"""{self.framework}-transfromers-test""" @property def UpperCAmelCase_ ( self : List[Any] ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
682
0
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A ( __UpperCamelCase ) -> List[str]: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X20000 and cp <= 0X2a6df) # or (cp >= 0X2a700 and cp <= 0X2b73f) # or (cp >= 0X2b740 and cp <= 0X2b81f) # or (cp >= 0X2b820 and cp <= 0X2ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2f800 and cp <= 0X2fa1f) # ): # return True return False def A ( __UpperCamelCase ) -> str: # word like '180' or '身高' or '神' for char in word: A__ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def A ( __UpperCamelCase ) -> str: A__ = set() for token in tokens: A__ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A__ = list(__UpperCamelCase ) return word_list def A ( __UpperCamelCase , __UpperCamelCase ) -> str: if not chinese_word_set: return bert_tokens A__ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A__ = bert_tokens A__ , A__ = 0, len(__UpperCamelCase ) while start < end: A__ = True if is_chinese(bert_word[start] ): A__ = min(end - start , __UpperCamelCase ) for i in range(__UpperCamelCase , 1 , -1 ): A__ = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): A__ = '##' + bert_word[j] A__ = start + i A__ = False break if single_word: start += 1 return bert_word def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = [] for i in range(0 , len(__UpperCamelCase ) , 100 ): A__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws A__ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A__ = [] for i in range(0 , len(__UpperCamelCase ) , 100 ): A__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A__ = [] for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ): A__ = [] for id in input_ids: A__ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A__ = add_sub_symbol(__UpperCamelCase , __UpperCamelCase ) A__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A__ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def A ( __UpperCamelCase ) -> List[str]: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , 'r' , encoding='utf-8' ) as f: A__ = f.readlines() A__ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A__ = LTP(args.ltp ) # faster in GPU device A__ = BertTokenizer.from_pretrained(args.bert ) A__ = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: A__ = [json.dumps(__UpperCamelCase ) + '\n' for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
9
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging a__ : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any: warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , UpperCAmelCase__ , ) super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
_lowerCAmelCase = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } _lowerCAmelCase = {value: key for key, value in encode_dict.items()} def _snake_case ( __snake_case ): _UpperCamelCase = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def _snake_case ( __snake_case ): if set(__snake_case ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) _UpperCamelCase = '''''' for word in coded.split(): while len(__snake_case ) != 0: decoded += decode_dict[word[:5]] _UpperCamelCase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
10
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ) # create the counting array __SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min __SCREAMING_SNAKE_CASE = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1] # create the output collection __SCREAMING_SNAKE_CASE = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCAmelCase_ ) ): __SCREAMING_SNAKE_CASE = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" a__ : Dict = input('''Enter numbers separated by a comma:\n''').strip() a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
682
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) class __A ( A ): '''simple docstring''' __lowerCamelCase : Optional[int] = 'timm_backbone' def __init__(self , A=None , A=3 , A=True , A=True , A=None , **A , ) -> Tuple: """simple docstring""" super().__init__(**A ) _a = backbone _a = num_channels _a = features_only _a = use_pretrained_backbone _a = True _a = out_indices if out_indices is not None else (-1,)
11
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
682
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[Any] = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = 'open-llama' def __init__( self , SCREAMING_SNAKE_CASE_=10_00_00 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=1_10_08 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : str = vocab_size lowercase__ : Tuple = max_position_embeddings lowercase__ : Tuple = hidden_size lowercase__ : Tuple = intermediate_size lowercase__ : Any = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Any = hidden_act lowercase__ : Optional[int] = initializer_range lowercase__ : Union[str, Any] = rms_norm_eps lowercase__ : Optional[Any] = use_cache lowercase__ : Dict = kwargs.pop( """use_memorry_efficient_attention""" , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = hidden_dropout_prob lowercase__ : Optional[int] = attention_dropout_prob lowercase__ : Optional[int] = use_stable_embedding lowercase__ : Dict = shared_input_output_embedding lowercase__ : str = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_) or len(self.rope_scaling) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'got {self.rope_scaling}') lowercase__ : int = self.rope_scaling.get("""type""" , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.rope_scaling.get("""factor""" , SCREAMING_SNAKE_CASE_) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}') if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
12
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : List[str] = logging.get_logger(__name__) a__ : str = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "xlm-roberta" def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"} else: __SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
682
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=False ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ('module.cls_token', 'vit.embeddings.cls_token'), ('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('module.pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('module.norm.weight', 'layernorm.weight'), ('module.norm.bias', 'layernorm.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowerCamelCase : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=False ) -> Optional[int]: for i in range(config.num_hidden_layers ): if base_model: __lowerCamelCase : List[str] = '' else: __lowerCamelCase : List[Any] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase : Optional[Any] = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' ) __lowerCamelCase : Optional[Any] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase : List[Any] = in_proj_weight[ : config.hidden_size, : ] __lowerCamelCase : Dict = in_proj_bias[: config.hidden_size] __lowerCamelCase : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :] def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> List[str]: __lowerCamelCase : Any = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : Any ) -> Tuple: # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. __lowerCamelCase : List[Any] = [ 'module.fc.fc1.weight', 'module.fc.fc1.bias', 'module.fc.bn1.weight', 'module.fc.bn1.bias', 'module.fc.bn1.running_mean', 'module.fc.bn1.running_var', 'module.fc.bn1.num_batches_tracked', 'module.fc.fc2.weight', 'module.fc.fc2.bias', 'module.fc.bn2.weight', 'module.fc.bn2.bias', 'module.fc.bn2.running_mean', 'module.fc.bn2.running_var', 'module.fc.bn2.num_batches_tracked', 'module.fc.fc3.weight', 'module.fc.fc3.bias', ] for k in ignore_keys: state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ) -> str: __lowerCamelCase : Any = dct.pop(UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = val def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> str: __lowerCamelCase : Dict = ViTMSNConfig() __lowerCamelCase : Dict = 10_00 __lowerCamelCase : Optional[Any] = 'datasets/huggingface/label-files' __lowerCamelCase : Tuple = 'imagenet-1k-id2label.json' __lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) ) __lowerCamelCase : Tuple = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()} __lowerCamelCase : Any = idalabel __lowerCamelCase : str = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: __lowerCamelCase : Union[str, Any] = 3_84 __lowerCamelCase : Tuple = 15_36 __lowerCamelCase : int = 6 elif "l16" in checkpoint_url: __lowerCamelCase : Any = 10_24 __lowerCamelCase : int = 40_96 __lowerCamelCase : int = 24 __lowerCamelCase : Any = 16 __lowerCamelCase : List[str] = 0.1 elif "b4" in checkpoint_url: __lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: __lowerCamelCase : str = 7 __lowerCamelCase : Union[str, Any] = 10_24 __lowerCamelCase : List[Any] = 40_96 __lowerCamelCase : Union[str, Any] = 24 __lowerCamelCase : Optional[Any] = 16 __lowerCamelCase : Optional[Any] = 0.1 __lowerCamelCase : Optional[Any] = ViTMSNModel(UpperCAmelCase_ ) __lowerCamelCase : int = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' )['target_encoder'] __lowerCamelCase : List[Any] = ViTImageProcessor(size=config.image_size ) remove_projection_head(UpperCAmelCase_ ) __lowerCamelCase : List[Any] = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ ) for src, dest in rename_keys: rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , base_model=UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ ) model.eval() __lowerCamelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCamelCase : Optional[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ) __lowerCamelCase : str = ViTImageProcessor( size=config.image_size , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ ) __lowerCamelCase : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ) # forward pass torch.manual_seed(2 ) __lowerCamelCase : int = model(**UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: __lowerCamelCase : Optional[Any] = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: __lowerCamelCase : List[Any] = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: __lowerCamelCase : str = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: __lowerCamelCase : int = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: __lowerCamelCase : Dict = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase_ , atol=1e-4 ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCAmelCase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": A__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) A__ : int = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
13
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ ) return flax_params def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } __SCREAMING_SNAKE_CASE = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __SCREAMING_SNAKE_CASE = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flax_dict[key] __SCREAMING_SNAKE_CASE = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T ) else: __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_flax_param(lowerCAmelCase_ ) if not use_large: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig() __SCREAMING_SNAKE_CASE = PixaStructTextConfig() else: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) __SCREAMING_SNAKE_CASE = PixaStructImageProcessor() __SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) if use_large: __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = True # mkdir if needed os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) print("Model saved in {}".format(lowerCAmelCase_ ) ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') a__ : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
682
0
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a__ = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def __UpperCAmelCase ( __a : List[Any]=True ) -> str: """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : str = None UpperCAmelCase__ : List[str] = None def __lowercase ( self , _a , _a ) -> Optional[Any]: with TemporaryDirectory() as tmp_dir: _a : Dict = dataset_module_factory(_a , cache_dir=_a ) _a : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=_a ) _a : DatasetBuilder = builder_cls( cache_dir=_a , config_name=_a , hash=dataset_module.hash , ) _a : int = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=_a ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) _a : Optional[int] = cached_path(_a , cache_dir=_a ) self.assertTrue(os.path.exists(_a ) ) @pytest.mark.integration def __UpperCAmelCase ( __a : Optional[int] ) -> List[Any]: """simple docstring""" _a : List[str] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' _a : List[str] = dataset_module_factory('''wikipedia''' ,cache_dir=__a ) _a : Tuple = import_main_class(dataset_module.module_path ) _a : DatasetBuilder = builder_cls( cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _a : int = None builder_instance.download_and_prepare() _a : Tuple = builder_instance.as_dataset() assert ds @pytest.mark.integration def __UpperCAmelCase ( __a : Optional[Any] ) -> Optional[Any]: """simple docstring""" _a : Optional[int] = dataset_module_factory('''wikipedia''' ,cache_dir=__a ) _a : Optional[int] = import_main_class(dataset_module.module_path ,dataset=__a ) _a : DatasetBuilder = builder_cls( cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,) _a : Dict = builder_instance.as_streaming_dataset() assert ds assert isinstance(__a ,__a ) assert "train" in ds assert isinstance(ds['''train'''] ,__a ) assert next(iter(ds['''train'''] ) )
14
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a__ : Optional[Any] = 1_6 a__ : str = 3_2 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" ) __SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE = 8 else: __SCREAMING_SNAKE_CASE = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a__ : List[Any] = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1": __SCREAMING_SNAKE_CASE = 2 # Initialize accelerator __SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["num_epochs"] ) __SCREAMING_SNAKE_CASE = int(config["seed"] ) __SCREAMING_SNAKE_CASE = int(config["batch_size"] ) __SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate scheduler __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
682
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever A : Tuple = logging.getLogger(__name__) class A ( UpperCAmelCase__ ): '''simple docstring''' def __init__(self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=None ) -> Optional[int]: """simple docstring""" super().__init__( _UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , ) lowercase__ = None def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int ) -> Union[str, Any]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually lowercase__ = self._infer_socket_ifname() # avoid clash with the NCCL port lowercase__ = str(distributed_port + 1 ) lowercase__ = dist.new_group(ranks=_UpperCAmelCase , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowerCamelCase__ (self : Optional[int] ) -> str: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=torch.floataa ) -> Tuple: """simple docstring""" lowercase__ = torch.empty(_UpperCAmelCase , dtype=_UpperCAmelCase ) dist.scatter(_UpperCAmelCase , src=0 , scatter_list=_UpperCAmelCase , group=self.process_group ) return target_tensor def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names lowercase__ = next((addr for addr in addrs if addr.startswith("""e""" )) , _UpperCAmelCase ) return ifname def lowerCamelCase__ (self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): lowercase__ , lowercase__ = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase ) # distributed training lowercase__ = dist.get_world_size(group=self.process_group ) # gather logic lowercase__ = None if self._is_main(): lowercase__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCAmelCase )] dist.gather(torch.tensor(_UpperCAmelCase ) , dst=0 , gather_list=_UpperCAmelCase , group=self.process_group ) # scatter logic lowercase__ = question_hidden_states.shape[0] lowercase__ = [] lowercase__ = [] if self._is_main(): assert len(_UpperCAmelCase ) == world_size lowercase__ , lowercase__ = self._main_retrieve(torch.cat(_UpperCAmelCase ).numpy() , _UpperCAmelCase ) lowercase__ , lowercase__ = torch.tensor(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase ) lowercase__ = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase ) lowercase__ = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase ) lowercase__ = self._scattered(_UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) lowercase__ = self._scattered(_UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCAmelCase )
15
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a__ : Dict = logging.get_logger(__name__) # General docstring a__ : str = '''RegNetConfig''' # Base docstring a__ : List[str] = '''facebook/regnet-y-040''' a__ : int = [1, 1_0_8_8, 7, 7] # Image classification docstring a__ : int = '''facebook/regnet-y-040''' a__ : str = '''tabby, tabby cat''' a__ : Optional[Any] = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Tuple , ) -> Any: super().__init__(**UpperCAmelCase__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __SCREAMING_SNAKE_CASE = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding="VALID" , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" , ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else tf.identity def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.convolution(self.padding(UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_channels __SCREAMING_SNAKE_CASE = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = shape_list(UpperCAmelCase__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) ) __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ ) class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) __SCREAMING_SNAKE_CASE = [ tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] ) -> Any: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) for layer_module in self.attention: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = hidden_state * pooled return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.2" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str ) -> Any: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[Any] ) -> Any: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.3" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __SCREAMING_SNAKE_CASE = [ # downsampling is done in the first layer with stride of 2 layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name="layers.0" ), *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int ) -> int: for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Any ) -> List[str]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"""stages.{i+1}""" ) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> TFBaseModelOutputWithNoAttention: __SCREAMING_SNAKE_CASE = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) __SCREAMING_SNAKE_CASE = stage_module(UpperCAmelCase__ ) if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) @keras_serializable class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" snake_case__ : Any = RegNetConfig def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = TFRegNetEmbeddings(UpperCAmelCase__ , name="embedder" ) __SCREAMING_SNAKE_CASE = TFRegNetEncoder(UpperCAmelCase__ , name="encoder" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) @unpack_inputs def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = encoder_outputs[0] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) # Change to NCHW output format have uniformity in the modules __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __SCREAMING_SNAKE_CASE = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : List[Any] = RegNetConfig snake_case__ : List[str] = "regnet" snake_case__ : str = "pixel_values" @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} a__ : Union[str, Any] = r''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' a__ : Optional[int] = r''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Tuple: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> Any: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_labels __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) # classification head __SCREAMING_SNAKE_CASE = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1] __SCREAMING_SNAKE_CASE = self.classifier[0](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.classifier[1](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ ) if not return_dict: __SCREAMING_SNAKE_CASE = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
682
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
682
0
import math class lowerCamelCase_ : def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1 __A : List[str] = n __A : List[str] = [ [math.inf for j in range(0 , __A )] for i in range(0 , __A ) ] # adjacency matrix for weight __A : str = [ [math.inf for j in range(0 , __A )] for i in range(0 , __A ) ] # dp[i][j] stores minimum distance from i to j def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ): __A : List[Any] = w def lowerCAmelCase_ ( self : Union[str, Any] ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): __A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ): return self.dp[u][v] if __name__ == "__main__": UpperCAmelCase_ : Tuple = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
17
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = r''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]: __SCREAMING_SNAKE_CASE = max_length __SCREAMING_SNAKE_CASE = max_position_embeddings @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool: __SCREAMING_SNAKE_CASE = input_ids.shape[-1] __SCREAMING_SNAKE_CASE = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = start_length __SCREAMING_SNAKE_CASE = max_new_tokens __SCREAMING_SNAKE_CASE = start_length + max_new_tokens @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict: __SCREAMING_SNAKE_CASE = max_time __SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool: return time.time() - self.initial_timestamp > self.max_time class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool: return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self ) @property def UpperCAmelCase_ ( self : Any ) -> Optional[int]: for stopping_criterium in self: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length return None def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = stopping_criteria.max_length __SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) ) return new_stopping_criteria
682
0
'''simple docstring''' import functools def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) @functools.cache def min_distance(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa _lowerCAmelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE_ ) , 1 + min_distance(SCREAMING_SNAKE_CASE_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
18
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : int = RoCBertTokenizer snake_case__ : int = None snake_case__ : Optional[Any] = False snake_case__ : int = True snake_case__ : Any = filter_non_english def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: super().setUp() __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} for i, value in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> List[str]: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase_ ( self : str ) -> List[str]: __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __SCREAMING_SNAKE_CASE = {} for i, token in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase_ ( self : List[Any] ) -> str: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase_ ( self : List[str] ) -> Tuple: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False __SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "Allen"), ((2_1, 2_3), "##NL"), ((2_3, 2_4), "##P"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "allen"), ((2_1, 2_3), "##nl"), ((2_3, 2_4), "##p"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = ["的", "人", "有"] __SCREAMING_SNAKE_CASE = "".join(UpperCAmelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ ) ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def UpperCAmelCase_ ( self : str ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __SCREAMING_SNAKE_CASE = "你好,你是谁" __SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel _a = HfApi() _a = {} # fmt: off _a = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) _a = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) _a = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) _a = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) _a = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) _a = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) _a = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) _a = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) _a = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) _a = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) _a = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) _a = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) _a = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) _a = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) _a = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on _a = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": _a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("""CompVis"""): _a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: _a = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) _a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) _a = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): _a = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
19
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[int] = "vivit" def __init__( self : Dict , UpperCAmelCase__ : Dict=2_2_4 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : str=[2, 1_6, 1_6] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : Optional[int]="gelu_fast" , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : str=1E-06 , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Any , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_frames __SCREAMING_SNAKE_CASE = tubelet_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = qkv_bias super().__init__(**UpperCAmelCase__ )
682
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase: Union[str, Any] = { 'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'], 'tokenization_roformer': ['RoFormerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase: Any = ['RoFormerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase: Tuple = [ 'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoFormerForCausalLM', 'RoFormerForMaskedLM', 'RoFormerForMultipleChoice', 'RoFormerForQuestionAnswering', 'RoFormerForSequenceClassification', 'RoFormerForTokenClassification', 'RoFormerLayer', 'RoFormerModel', 'RoFormerPreTrainedModel', 'load_tf_weights_in_roformer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase: Optional[int] = [ 'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRoFormerForCausalLM', 'TFRoFormerForMaskedLM', 'TFRoFormerForMultipleChoice', 'TFRoFormerForQuestionAnswering', 'TFRoFormerForSequenceClassification', 'TFRoFormerForTokenClassification', 'TFRoFormerLayer', 'TFRoFormerModel', 'TFRoFormerPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase: int = [ 'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxRoFormerForMaskedLM', 'FlaxRoFormerForMultipleChoice', 'FlaxRoFormerForQuestionAnswering', 'FlaxRoFormerForSequenceClassification', 'FlaxRoFormerForTokenClassification', 'FlaxRoFormerModel', 'FlaxRoFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _lowerCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
20
"""simple docstring""" import numpy as np from transformers import Pipeline def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = np.max(lowerCAmelCase_ , axis=-1 , keepdims=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def UpperCAmelCase_ ( self : Tuple , **UpperCAmelCase__ : str ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if "second_text" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["second_text"] return preprocess_kwargs, {}, {} def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None ) -> str: return self.tokenizer(UpperCAmelCase__ , text_pair=UpperCAmelCase__ , return_tensors=self.framework ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: return self.model(**UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = model_outputs.logits[0].numpy() __SCREAMING_SNAKE_CASE = softmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.model.config.idalabel[best_class] __SCREAMING_SNAKE_CASE = probabilities[best_class].item() __SCREAMING_SNAKE_CASE = logits.tolist() return {"label": label, "score": score, "logits": logits}
682
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __A : UpperCamelCase = BlenderbotConfig UpperCamelCase = {} UpperCamelCase = """gelu""" def __init__( self :Union[str, Any] , __snake_case :Union[str, Any] , __snake_case :Union[str, Any]=13 , __snake_case :Optional[Any]=7 , __snake_case :Optional[int]=True , __snake_case :Optional[Any]=False , __snake_case :Dict=99 , __snake_case :List[str]=32 , __snake_case :List[str]=2 , __snake_case :List[str]=4 , __snake_case :List[str]=37 , __snake_case :Any=0.1 , __snake_case :List[str]=0.1 , __snake_case :Union[str, Any]=20 , __snake_case :int=2 , __snake_case :Dict=1 , __snake_case :Any=0 , ): '''simple docstring''' __magic_name__ : Dict =parent __magic_name__ : Dict =batch_size __magic_name__ : Dict =seq_length __magic_name__ : Union[str, Any] =is_training __magic_name__ : int =use_labels __magic_name__ : str =vocab_size __magic_name__ : Optional[int] =hidden_size __magic_name__ : List[Any] =num_hidden_layers __magic_name__ : str =num_attention_heads __magic_name__ : Dict =intermediate_size __magic_name__ : int =hidden_dropout_prob __magic_name__ : Tuple =attention_probs_dropout_prob __magic_name__ : Union[str, Any] =max_position_embeddings __magic_name__ : int =eos_token_id __magic_name__ : Optional[int] =pad_token_id __magic_name__ : Optional[Any] =bos_token_id def A__ ( self :Dict ): '''simple docstring''' __magic_name__ : int =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __magic_name__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __magic_name__ : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 ) __magic_name__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Dict =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __magic_name__ : List[str] =prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case ) return config, inputs_dict def A__ ( self :List[str] , __snake_case :Optional[Any] , __snake_case :Dict ): '''simple docstring''' __magic_name__ : List[str] =TFBlenderbotModel(config=__snake_case ).get_decoder() __magic_name__ : Union[str, Any] =inputs_dict["""input_ids"""] __magic_name__ : Any =input_ids[:1, :] __magic_name__ : List[str] =inputs_dict["""attention_mask"""][:1, :] __magic_name__ : Dict =inputs_dict["""head_mask"""] __magic_name__ : Optional[Any] =1 # first forward pass __magic_name__ : Tuple =model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case ) __magic_name__ , __magic_name__ : List[str] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __magic_name__ : Any =ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ : int =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __magic_name__ : Any =tf.concat([input_ids, next_tokens] , axis=-1 ) __magic_name__ : Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __magic_name__ : Optional[int] =model(__snake_case , attention_mask=__snake_case )[0] __magic_name__ : Dict =model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __magic_name__ : Union[str, Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __magic_name__ : Any =output_from_no_past[:, -3:, random_slice_idx] __magic_name__ : Union[str, Any] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__snake_case , __snake_case , rtol=1E-3 ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ): if attention_mask is None: __magic_name__ : Optional[int] =tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __magic_name__ : List[Any] =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __magic_name__ : Optional[Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __magic_name__ : Tuple =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __magic_name__ : Any =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () UpperCamelCase = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False def A__ ( self :Union[str, Any] ): '''simple docstring''' __magic_name__ : str =TFBlenderbotModelTester(self ) __magic_name__ : Dict =ConfigTester(self , config_class=__snake_case ) def A__ ( self :Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def A__ ( self :List[str] ): '''simple docstring''' __magic_name__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__snake_case ) @require_tokenizers @require_tf class __A ( unittest.TestCase ): UpperCamelCase = ["""My friends are cool but they eat too many carbs."""] UpperCamelCase = """facebook/blenderbot-400M-distill""" @cached_property def A__ ( self :List[str] ): '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : Tuple =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A__ ( self :Union[str, Any] ): '''simple docstring''' __magic_name__ : int =self.tokenizer(self.src_text , return_tensors="""tf""" ) __magic_name__ : Optional[int] =self.model.generate( model_inputs.input_ids , ) __magic_name__ : Union[str, Any] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
21
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"""{test_file} instead.""" ) __SCREAMING_SNAKE_CASE = components[-1] if not test_fn.endswith("py" ): raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith("test_modeling_" ): raise ValueError( f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )] __SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ ) return test_module_path def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ ) return test_module def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] ) if len(lowerCAmelCase_ ) > 0: test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_class() if hasattr(lowerCAmelCase_ , "setUp" ): test.setUp() __SCREAMING_SNAKE_CASE = None if hasattr(lowerCAmelCase_ , "model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __SCREAMING_SNAKE_CASE = test.model_tester.__class__ return model_tester def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: __SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ ) if tester_class is not None: tester_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes} return test_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_test_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_to_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o.__name__ elif isinstance(lowerCAmelCase_ , (list, tuple) ): return [to_json(lowerCAmelCase_ ) for x in o] elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()} else: return o
682
0
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int]=5 ): '''simple docstring''' assert masked_input.count('''<mask>''' ) == 1 _a = torch.tensor(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1 _a = model(UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple _a = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _a = logits[0, masked_index, :] _a = logits.softmax(dim=0 ) _a , _a = prob.topk(k=UpperCamelCase , dim=0 ) _a = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase ) )] ) _a = tokenizer.mask_token _a = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): _a = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(UpperCamelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(UpperCamelCase ) , UpperCamelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(UpperCamelCase , UpperCamelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _snake_case : Optional[Any] = CamembertTokenizer.from_pretrained('camembert-base') _snake_case : str = CamembertForMaskedLM.from_pretrained('camembert-base') model.eval() _snake_case : str = 'Le camembert est <mask> :)' print(fill_mask(masked_input, model, tokenizer, topk=3))
22
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase__ (lowerCAmelCase_=None ): '''simple docstring''' if subparsers is not None: __SCREAMING_SNAKE_CASE = subparsers.add_parser("env" ) else: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.__version__ __SCREAMING_SNAKE_CASE = torch.cuda.is_available() __SCREAMING_SNAKE_CASE = is_xpu_available() __SCREAMING_SNAKE_CASE = is_npu_available() __SCREAMING_SNAKE_CASE = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict() __SCREAMING_SNAKE_CASE = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(lowerCAmelCase_ ), "PyTorch NPU available": str(lowerCAmelCase_ ), "System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __SCREAMING_SNAKE_CASE = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) __SCREAMING_SNAKE_CASE = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else f"""\t{accelerate_config}""" ) print(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = accelerate_config return info def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = env_command_parser() __SCREAMING_SNAKE_CASE = parser.parse_args() env_command(lowerCAmelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
682
0
def _snake_case (__lowercase): UpperCamelCase_ = int(__lowercase) if n_element < 1: UpperCamelCase_ = ValueError('a should be a positive number') raise my_error UpperCamelCase_ = [1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0) UpperCamelCase_ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5)) index += 1 return hamming_list if __name__ == "__main__": snake_case__ : str = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") snake_case__ : Optional[Any] = hamming(int(n)) print("""-----------------------------------------------------""") print(f'The list with nth numbers is: {hamming_numbers}') print("""-----------------------------------------------------""")
23
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a__ : int = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a__ : Union[str, Any] = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' a__ : Optional[Any] = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def remove_articles(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams] __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for sgram, scount in sgramcounter.items(): __SCREAMING_SNAKE_CASE = scount * numref __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for cgram, ccount in cgramcounter.items(): __SCREAMING_SNAKE_CASE = ccount * numref # KEEP __SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep __SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __SCREAMING_SNAKE_CASE = 0 if keepscore_precision > 0 or keepscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep __SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 if addscore_precision > 0 or addscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = ssent.split(" " ) __SCREAMING_SNAKE_CASE = csent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for rsent in rsents: __SCREAMING_SNAKE_CASE = rsent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4 __SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ): '''simple docstring''' if lowercase: __SCREAMING_SNAKE_CASE = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sentence if not return_str: __SCREAMING_SNAKE_CASE = normalized_sent.split() return normalized_sent def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError("Sources length must match predictions and references lengths." ) __SCREAMING_SNAKE_CASE = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) __SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ ) return 100 * sari_score def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] __SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) return result
682
0
'''simple docstring''' def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float )-> float: '''simple docstring''' return round(float(moles / volume ) * nfactor ) def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float: '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (volume) ) ) def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float: '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float: '''simple docstring''' return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : str ) -> Any: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple: __SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str: __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : str = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : int = True def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict: __SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : int ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: # This regression test was failing with PyTorch < 1.3 ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def UpperCAmelCase_ ( self : Optional[int] ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> int: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) ) __SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
682
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 10 def __UpperCamelCase ( self : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3, 4] SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a ) def __UpperCamelCase ( self : str ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a ) def __UpperCamelCase ( self : Optional[int] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = process_story(a ) self.assertEqual(a , [] ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = process_story(a ) self.assertEqual(a , [] ) self.assertEqual(a , [] ) def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = process_story(a ) SCREAMING_SNAKE_CASE : Tuple = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(a , a ) SCREAMING_SNAKE_CASE : List[Any] = ["It was the best of times."] self.assertEqual(a , a ) def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = torch.tensor([1, 2, 3, 4] ) SCREAMING_SNAKE_CASE : Any = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(a , 0 ).numpy() , expected.numpy() ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a , 23 ).numpy() , expected.numpy() ) def __UpperCamelCase ( self : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : str = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a , 1 ).numpy() , expected.numpy() ) def __UpperCamelCase ( self : Any ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = 101 SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) SCREAMING_SNAKE_CASE : Tuple = compute_token_type_ids(a , a ) np.testing.assert_array_equal(a , a )
25
"""simple docstring""" import os def UpperCAmelCase__ (): '''simple docstring''' with open(os.path.dirname(lowerCAmelCase_ ) + "/p022_names.txt" ) as file: __SCREAMING_SNAKE_CASE = str(file.readlines()[0] ) __SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," ) names.sort() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, name in enumerate(lowerCAmelCase_ ): for letter in name: name_score += ord(lowerCAmelCase_ ) - 64 total_score += (i + 1) * name_score __SCREAMING_SNAKE_CASE = 0 return total_score if __name__ == "__main__": print(solution())
682
0
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) __snake_case : Union[str, Any] = precision __snake_case : List[Any] = ceil(precision / 14 ) __snake_case : List[str] = 42_6880 * Decimal(1_0005 ).sqrt() __snake_case : int = 1 __snake_case : Tuple = 1359_1409 __snake_case : Any = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): __snake_case : str = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 5_4514_0134 exponential_term *= -26_2537_4126_4076_8000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __UpperCamelCase = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
26
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1.5 __SCREAMING_SNAKE_CASE = int(factor * num_class_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: __SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4: break else: __SCREAMING_SNAKE_CASE = int(factor * num_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ ) with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( f"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: __SCREAMING_SNAKE_CASE = class_images[count] count += 1 try: __SCREAMING_SNAKE_CASE = requests.get(images["url"] ) if img.status_code == 200: __SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ ) return parser.parse_args() if __name__ == "__main__": a__ : Optional[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
682
0
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
27
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a__ : str = logging.get_logger(__name__) class UpperCamelCase_ ( enum.Enum): """simple docstring""" snake_case__ : Optional[int] = 0 snake_case__ : Dict = 1 @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = "generated" def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if truncation is not None: __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = generate_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None and return_type is None: __SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __SCREAMING_SNAKE_CASE = return_type if clean_up_tokenization_spaces is not None: __SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces if stop_sequence is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __SCREAMING_SNAKE_CASE = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]: return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , UpperCAmelCase__ ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) __SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],) __SCREAMING_SNAKE_CASE = True elif isinstance(args[0] , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (prefix + args[0],) __SCREAMING_SNAKE_CASE = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) __SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) if ( isinstance(args[0] , UpperCAmelCase__ ) and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] ) and all(len(UpperCAmelCase__ ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple: __SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ ) return inputs def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any: if self.framework == "pt": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape elif self.framework == "tf": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy() __SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length ) __SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) __SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output_ids.shape[0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __SCREAMING_SNAKE_CASE = { F"""{self.return_name}_text""": self.tokenizer.decode( UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ) } records.append(UpperCAmelCase__ ) return records @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "summary" def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "translation" def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]: if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ): return self.tokenizer._build_translation_inputs( *UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ ) else: return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ ) if src_lang is not None: __SCREAMING_SNAKE_CASE = src_lang if tgt_lang is not None: __SCREAMING_SNAKE_CASE = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task ) __SCREAMING_SNAKE_CASE = task.split("_" ) if task and len(UpperCAmelCase__ ) == 4: # translation, XX, to YY __SCREAMING_SNAKE_CASE = items[1] __SCREAMING_SNAKE_CASE = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging UpperCamelCase_ = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self, A = 101 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = length def __len__( self ): '''simple docstring''' return self.length def __getitem__( self, A ): '''simple docstring''' return i class _a : '''simple docstring''' def __call__( self, A ): '''simple docstring''' return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )} class _a ( nn.Module ): '''simple docstring''' def __init__( self ): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. SCREAMING_SNAKE_CASE : List[str] = nn.Linear(120, 80 ) def UpperCamelCase_ ( self, A, A=None ): '''simple docstring''' if labels is not None: return torch.tensor(0.0, device=input_ids.device ), input_ids else: return input_ids class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @require_torch_neuroncore def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() SCREAMING_SNAKE_CASE : Tuple = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE : str = F"--output_dir {output_dir}".split() SCREAMING_SNAKE_CASE : Dict = ['torchrun'] + distributed_args + args execute_subprocess_async(A, env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @require_torch_multi_gpu def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE : Optional[int] = F"--output_dir {output_dir}".split() SCREAMING_SNAKE_CASE : Optional[int] = ['torchrun'] + distributed_args + args execute_subprocess_async(A, env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py UpperCamelCase_ = HfArgumentParser((TrainingArguments,)) UpperCamelCase_ = parser.parse_args_into_dataclasses()[0] logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_0_1, 4_0, 7]: UpperCamelCase_ = DummyDataset(dataset_length) def lowercase__( __UpperCamelCase: EvalPrediction ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = list(range(len(__UpperCamelCase ) ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( 'Predictions and/or labels do not match expected results:\n - predictions: ' f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} UpperCamelCase_ = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) UpperCamelCase_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCamelCase_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCamelCase_ = 2 UpperCamelCase_ = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCamelCase_ = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCamelCase_ = None
28
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
0
"""simple docstring""" from jiwer import compute_measures import datasets A_ = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ A_ = """\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. """ A_ = """ Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> wer = datasets.load_metric(\"wer\") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): def UpperCAmelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ): if concatenate_texts: return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"] else: lowerCamelCase_ = 0 lowerCamelCase_ = 0 for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ): lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
29
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=None , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # create attention mask __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.seq_length // 2 __SCREAMING_SNAKE_CASE = 0 # first forward pass __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __SCREAMING_SNAKE_CASE = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1 __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = random_other_next_tokens # append to next input_ids and attn_mask __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , ) # get two different outputs __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) # first forward pass __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[ "last_hidden_state" ] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = BioGptForTokenClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> str: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () snake_case__ : Tuple = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Optional[Any] = False def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : int ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = "left" # Define PAD Token = EOS Token = 50256 __SCREAMING_SNAKE_CASE = tokenizer.eos_token __SCREAMING_SNAKE_CASE = model.config.eos_token_id # use different length sentences to test batching __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little", "Today, I", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs["input_ids"].to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , ) __SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = "multi_label_classification" __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = 4_2_3_8_4 __SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( **UpperCAmelCase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(_lowercase ) ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if index == len(_lowercase ): return True # Recursive Step for i in range(_lowercase ): if valid_coloring(graph[index] , _lowercase , _lowercase ): # Color current vertex UpperCAmelCase_ : Dict = i # Validate coloring if util_color(_lowercase , _lowercase , _lowercase , index + 1 ): return True # Backtrack UpperCAmelCase_ : List[Any] = -1 return False def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = [-1] * len(_lowercase ) if util_color(_lowercase , _lowercase , _lowercase , 0 ): return colored_vertices return []
30
"""simple docstring""" import os import pytest from attr import dataclass a__ : int = '''us-east-1''' # defaults region @dataclass class UpperCamelCase_ : """simple docstring""" snake_case__ : str snake_case__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" snake_case__ : Optional[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } snake_case__ : Tuple = {**hyperparameters, "max_steps": 1000} @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self : int ) -> str: return F"""{self.framework}-transfromers-test""" @property def UpperCAmelCase_ ( self : List[Any] ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
682
0
import math def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> str: SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 while num > 0: SCREAMING_SNAKE_CASE_ = num % 8 SCREAMING_SNAKE_CASE_ = octal + (remainder * math.floor(math.pow(10 , __UpperCAmelCase ) )) counter += 1 SCREAMING_SNAKE_CASE_ = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"0o{int(__UpperCAmelCase )}" def UpperCAmelCase_ ( ) -> None: print('\n2 in octal is:' ) print(decimal_to_octal(2 ) ) # = 2 print('\n8 in octal is:' ) print(decimal_to_octal(8 ) ) # = 10 print('\n65 in octal is:' ) print(decimal_to_octal(65 ) ) # = 101 print('\n216 in octal is:' ) print(decimal_to_octal(2_16 ) ) # = 330 print('\n512 in octal is:' ) print(decimal_to_octal(5_12 ) ) # = 1000 print('\n' ) if __name__ == "__main__": main()
31
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging a__ : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any: warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , UpperCAmelCase__ , ) super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=50 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = initializer_range _UpperCAmelCase = use_labels _UpperCAmelCase = scope def UpperCamelCase( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def UpperCamelCase( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCamelCase( self ): ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ): _UpperCAmelCase = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ): _UpperCAmelCase = True _UpperCAmelCase = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , ) _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ): _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = BertGenerationDecoder(config=_UpperCamelCase ).to(_UpperCamelCase ).eval() # first forward pass _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , ) _UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['''hidden_states'''][0] _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['''hidden_states'''][0] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ): _UpperCAmelCase = BertGenerationDecoder(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ): __A : Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __A : Tuple = (BertGenerationDecoder,) if is_torch_available() else () __A : Tuple = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationEncoderTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = '''bert''' self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase ) def UpperCamelCase( self ): # This regression test was failing with PyTorch < 1.3 ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _UpperCAmelCase = None self.model_tester.create_and_check_model_as_decoder( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase ) @slow def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(_UpperCamelCase ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) _UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): _UpperCAmelCase = model(_UpperCamelCase )[0] _UpperCAmelCase = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , _UpperCamelCase ) _UpperCAmelCase = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) _UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): _UpperCAmelCase = model(_UpperCamelCase )[0] _UpperCAmelCase = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , _UpperCamelCase ) _UpperCAmelCase = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
32
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ) # create the counting array __SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min __SCREAMING_SNAKE_CASE = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1] # create the output collection __SCREAMING_SNAKE_CASE = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCAmelCase_ ) ): __SCREAMING_SNAKE_CASE = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" a__ : Dict = input('''Enter numbers separated by a comma:\n''').strip() a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
682
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = inspect.getfile(accelerate.test_utils ) snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) snake_case__ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): print(F"""Found {torch.cuda.device_count()} devices.""" ) snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): print(F"""Found {torch.cuda.device_count()} devices.""" ) snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self:Tuple ): print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(_a , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase__ : Optional[int] = Accelerator() lowerCamelCase__ : Union[str, Any] = (accelerator.state.process_index + 2, 1_0) lowerCamelCase__ : List[str] = torch.randint(0, 1_0, shape).to(accelerator.device) lowerCamelCase__ : Union[str, Any] = """""" lowerCamelCase__ : str = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCamelCase__ : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCamelCase__ : str = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
33
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
682
0
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class snake_case_ : """simple docstring""" A_ = None def UpperCAmelCase__ ( self) -> Optional[Any]: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) UpperCamelCase = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , lowerCamelCase_) def UpperCAmelCase__ ( self) -> int: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , '''feat_extract.json''') feat_extract_first.to_json_file(lowerCamelCase_) UpperCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase_) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict()) def UpperCAmelCase__ ( self) -> int: UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = feat_extract_first.save_pretrained(lowerCamelCase_)[0] check_json_file_has_correct_format(lowerCamelCase_) UpperCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase_) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict()) def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = self.feature_extraction_class() self.assertIsNotNone(lowerCamelCase_)
34
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : List[str] = logging.get_logger(__name__) a__ : str = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "xlm-roberta" def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"} else: __SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
682
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
35
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ ) return flax_params def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } __SCREAMING_SNAKE_CASE = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __SCREAMING_SNAKE_CASE = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flax_dict[key] __SCREAMING_SNAKE_CASE = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T ) else: __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_flax_param(lowerCAmelCase_ ) if not use_large: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig() __SCREAMING_SNAKE_CASE = PixaStructTextConfig() else: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) __SCREAMING_SNAKE_CASE = PixaStructImageProcessor() __SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) if use_large: __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = True # mkdir if needed os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) print("Model saved in {}".format(lowerCAmelCase_ ) ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') a__ : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
682
0
def lowercase ( __A : int = 200_0000 ) -> int: '''simple docstring''' snake_case : List[str] = [0 for i in range(n + 1 )] snake_case : Optional[Any] = 1 snake_case : Tuple = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __A ): snake_case : Optional[int] = 1 snake_case : List[str] = 0 for i in range(__A ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
36
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a__ : Optional[Any] = 1_6 a__ : str = 3_2 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" ) __SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE = 8 else: __SCREAMING_SNAKE_CASE = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a__ : List[Any] = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1": __SCREAMING_SNAKE_CASE = 2 # Initialize accelerator __SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["num_epochs"] ) __SCREAMING_SNAKE_CASE = int(config["seed"] ) __SCREAMING_SNAKE_CASE = int(config["batch_size"] ) __SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate scheduler __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
682
0
from math import asin, atan, cos, radians, sin, sqrt, tan UpperCamelCase : List[Any] = 637_8137.0 UpperCamelCase : Tuple = 635_6752.31_4245 UpperCamelCase : Optional[Any] = 637_8137 def UpperCamelCase_ ( __a , __a , __a , __a ) -> float: a__ : Optional[int] = (AXIS_A - AXIS_B) / AXIS_A a__ : List[str] = atan((1 - flattening) * tan(radians(__a ) ) ) a__ : List[Any] = atan((1 - flattening) * tan(radians(__a ) ) ) a__ : str = radians(__a ) a__ : int = radians(__a ) # Equation a__ : Optional[int] = sin((phi_a - phi_a) / 2 ) a__ : Tuple = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda a__ : Optional[Any] = sqrt(sin_sq_phi + (cos(__a ) * cos(__a ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__a ) if __name__ == "__main__": import doctest doctest.testmod()
37
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a__ : Dict = logging.get_logger(__name__) # General docstring a__ : str = '''RegNetConfig''' # Base docstring a__ : List[str] = '''facebook/regnet-y-040''' a__ : int = [1, 1_0_8_8, 7, 7] # Image classification docstring a__ : int = '''facebook/regnet-y-040''' a__ : str = '''tabby, tabby cat''' a__ : Optional[Any] = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Tuple , ) -> Any: super().__init__(**UpperCAmelCase__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __SCREAMING_SNAKE_CASE = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding="VALID" , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" , ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else tf.identity def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.convolution(self.padding(UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_channels __SCREAMING_SNAKE_CASE = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = shape_list(UpperCAmelCase__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) ) __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ ) class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) __SCREAMING_SNAKE_CASE = [ tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] ) -> Any: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) for layer_module in self.attention: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = hidden_state * pooled return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.2" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str ) -> Any: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[Any] ) -> Any: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.3" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __SCREAMING_SNAKE_CASE = [ # downsampling is done in the first layer with stride of 2 layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name="layers.0" ), *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int ) -> int: for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Any ) -> List[str]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"""stages.{i+1}""" ) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> TFBaseModelOutputWithNoAttention: __SCREAMING_SNAKE_CASE = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) __SCREAMING_SNAKE_CASE = stage_module(UpperCAmelCase__ ) if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) @keras_serializable class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" snake_case__ : Any = RegNetConfig def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = TFRegNetEmbeddings(UpperCAmelCase__ , name="embedder" ) __SCREAMING_SNAKE_CASE = TFRegNetEncoder(UpperCAmelCase__ , name="encoder" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) @unpack_inputs def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = encoder_outputs[0] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) # Change to NCHW output format have uniformity in the modules __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __SCREAMING_SNAKE_CASE = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : List[Any] = RegNetConfig snake_case__ : List[str] = "regnet" snake_case__ : str = "pixel_values" @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} a__ : Union[str, Any] = r''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' a__ : Optional[int] = r''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Tuple: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> Any: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_labels __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) # classification head __SCREAMING_SNAKE_CASE = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1] __SCREAMING_SNAKE_CASE = self.classifier[0](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.classifier[1](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ ) if not return_dict: __SCREAMING_SNAKE_CASE = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
682
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor A_ : Dict = logging.get_logger(__name__) class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
38
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
682
0
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ): if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3317044064679887385961981 and not allow_probable: raise ValueError( '''Warning: upper bound of deterministic test is exceeded. ''' '''Pass allow_probable=True to allow probabilistic test. ''' '''A return value of True indicates a probable prime.''' ) # array bounds provided by analysis snake_case_ = [ 2047, 1373653, 25326001, 3215031751, 2152302898747, 3474749660383, 341550071728321, 1, 3825123056546413051, 1, 1, 318665857834031151167461, 3317044064679887385961981, ] snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(SCREAMING_SNAKE_CASE__ , 1 ): if n < _p: # then we have our last prime to check snake_case_ = primes[:idx] break snake_case_, snake_case_ = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: snake_case_ = False for r in range(SCREAMING_SNAKE_CASE__ ): snake_case_ = pow(SCREAMING_SNAKE_CASE__ , d * 2**r , SCREAMING_SNAKE_CASE__ ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): snake_case_ = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def __SCREAMING_SNAKE_CASE (): assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838201 ) assert miller_rabin(838207 ) # 1_373_653 assert not miller_rabin(17316001 ) assert miller_rabin(17316017 ) # 25_326_001 assert not miller_rabin(3078386641 ) assert miller_rabin(3078386653 ) # 3_215_031_751 assert not miller_rabin(1713045574801 ) assert miller_rabin(1713045574819 ) # 2_152_302_898_747 assert not miller_rabin(2779799728307 ) assert miller_rabin(2779799728327 ) # 3_474_749_660_383 assert not miller_rabin(113850023909441 ) assert miller_rabin(113850023909527 ) # 341_550_071_728_321 assert not miller_rabin(1275041018848804351 ) assert miller_rabin(1275041018848804391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79666464458507787791867 ) assert miller_rabin(79666464458507787791951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552840677446647897660333 ) assert miller_rabin(552840677446647897660359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
39
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = r''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]: __SCREAMING_SNAKE_CASE = max_length __SCREAMING_SNAKE_CASE = max_position_embeddings @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool: __SCREAMING_SNAKE_CASE = input_ids.shape[-1] __SCREAMING_SNAKE_CASE = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = start_length __SCREAMING_SNAKE_CASE = max_new_tokens __SCREAMING_SNAKE_CASE = start_length + max_new_tokens @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict: __SCREAMING_SNAKE_CASE = max_time __SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool: return time.time() - self.initial_timestamp > self.max_time class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool: return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self ) @property def UpperCAmelCase_ ( self : Any ) -> Optional[int]: for stopping_criterium in self: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length return None def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = stopping_criteria.max_length __SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) ) return new_stopping_criteria
682
0
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCamelCase ( snake_case__ : Tuple="" ) -> str: UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> int: UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) # Ensure that the file contains the same value as the original tensor UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) ) def snake_case_ ( self ) -> Any: UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' ) sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 ) UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) ) UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = 'Hey!' UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : int = RoCBertTokenizer snake_case__ : int = None snake_case__ : Optional[Any] = False snake_case__ : int = True snake_case__ : Any = filter_non_english def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: super().setUp() __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} for i, value in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> List[str]: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase_ ( self : str ) -> List[str]: __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __SCREAMING_SNAKE_CASE = {} for i, token in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase_ ( self : List[Any] ) -> str: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase_ ( self : List[str] ) -> Tuple: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False __SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "Allen"), ((2_1, 2_3), "##NL"), ((2_3, 2_4), "##P"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "allen"), ((2_1, 2_3), "##nl"), ((2_3, 2_4), "##p"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = ["的", "人", "有"] __SCREAMING_SNAKE_CASE = "".join(UpperCAmelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ ) ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def UpperCAmelCase_ ( self : str ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __SCREAMING_SNAKE_CASE = "你好,你是谁" __SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
'''simple docstring''' def _A ( A__ , A__ ): """simple docstring""" __lowercase = [1] for i in range(2 , A__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" __lowercase = [] __lowercase = list(range(A__ ) ) # Find permutation while factorials: __lowercase = factorials.pop() __lowercase , __lowercase = divmod(A__ , A__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
41
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[int] = "vivit" def __init__( self : Dict , UpperCAmelCase__ : Dict=2_2_4 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : str=[2, 1_6, 1_6] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : Optional[int]="gelu_fast" , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : str=1E-06 , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Any , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_frames __SCREAMING_SNAKE_CASE = tubelet_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = qkv_bias super().__init__(**UpperCAmelCase__ )
682
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 42 class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ) -> Tuple: '''simple docstring''' super().__init__() lowerCamelCase_ = num_attention_heads lowerCamelCase_ = attention_head_dim lowerCamelCase_ = num_attention_heads * attention_head_dim lowerCamelCase_ = in_channels lowerCamelCase_ = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , eps=1E-6 , affine=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. Define transformers blocks lowerCamelCase_ = nn.ModuleList( [ BasicTransformerBlock( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , double_self_attention=SCREAMING_SNAKE_CASE_ , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , ) for d in range(SCREAMING_SNAKE_CASE_ ) ] ) lowerCamelCase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Any: '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = hidden_states.shape lowerCamelCase_ = batch_frames // num_frames lowerCamelCase_ = hidden_states lowerCamelCase_ = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCamelCase_ = self.norm(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.proj_in(SCREAMING_SNAKE_CASE_ ) # 2. Blocks for block in self.transformer_blocks: lowerCamelCase_ = block( SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ , ) # 3. Output lowerCamelCase_ = self.proj_out(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = ( hidden_states[None, None, :] .reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCamelCase_ = hidden_states.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE_ )
42
"""simple docstring""" import numpy as np from transformers import Pipeline def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = np.max(lowerCAmelCase_ , axis=-1 , keepdims=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def UpperCAmelCase_ ( self : Tuple , **UpperCAmelCase__ : str ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if "second_text" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["second_text"] return preprocess_kwargs, {}, {} def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None ) -> str: return self.tokenizer(UpperCAmelCase__ , text_pair=UpperCAmelCase__ , return_tensors=self.framework ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: return self.model(**UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = model_outputs.logits[0].numpy() __SCREAMING_SNAKE_CASE = softmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.model.config.idalabel[best_class] __SCREAMING_SNAKE_CASE = probabilities[best_class].item() __SCREAMING_SNAKE_CASE = logits.tolist() return {"label": label, "score": score, "logits": logits}
682
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def _a ( ): """simple docstring""" lowercase__ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=SCREAMING_SNAKE_CASE ) lowercase__ = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=SCREAMING_SNAKE_CASE ) env_command_parser(subparsers=SCREAMING_SNAKE_CASE ) launch_command_parser(subparsers=SCREAMING_SNAKE_CASE ) tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE ) test_command_parser(subparsers=SCREAMING_SNAKE_CASE ) # Let's go lowercase__ = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ): parser.print_help() exit(1 ) # Run args.func(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
43
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"""{test_file} instead.""" ) __SCREAMING_SNAKE_CASE = components[-1] if not test_fn.endswith("py" ): raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith("test_modeling_" ): raise ValueError( f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )] __SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ ) return test_module_path def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ ) return test_module def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] ) if len(lowerCAmelCase_ ) > 0: test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_class() if hasattr(lowerCAmelCase_ , "setUp" ): test.setUp() __SCREAMING_SNAKE_CASE = None if hasattr(lowerCAmelCase_ , "model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __SCREAMING_SNAKE_CASE = test.model_tester.__class__ return model_tester def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: __SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ ) if tester_class is not None: tester_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes} return test_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_test_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_to_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o.__name__ elif isinstance(lowerCAmelCase_ , (list, tuple) ): return [to_json(lowerCAmelCase_ ) for x in o] elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()} else: return o
682
0
'''simple docstring''' import math from datetime import datetime, timedelta def A_ ( _lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : List[str] = year % 19 _lowerCamelCase : Dict = year % 4 _lowerCamelCase : str = year % 7 _lowerCamelCase : Any = math.floor(year / 100 ) _lowerCamelCase : Union[str, Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) _lowerCamelCase : str = leap_day_inhibits / 4 _lowerCamelCase : Tuple = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 _lowerCamelCase : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCamelCase : str = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon _lowerCamelCase : Any = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(_lowerCAmelCase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(_lowerCAmelCase , 4 , 18 ) else: return datetime(_lowerCAmelCase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCAmelCase_ : Any = 'will be' if year > datetime.now().year else 'was' print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
44
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase__ (lowerCAmelCase_=None ): '''simple docstring''' if subparsers is not None: __SCREAMING_SNAKE_CASE = subparsers.add_parser("env" ) else: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.__version__ __SCREAMING_SNAKE_CASE = torch.cuda.is_available() __SCREAMING_SNAKE_CASE = is_xpu_available() __SCREAMING_SNAKE_CASE = is_npu_available() __SCREAMING_SNAKE_CASE = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict() __SCREAMING_SNAKE_CASE = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(lowerCAmelCase_ ), "PyTorch NPU available": str(lowerCAmelCase_ ), "System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __SCREAMING_SNAKE_CASE = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) __SCREAMING_SNAKE_CASE = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else f"""\t{accelerate_config}""" ) print(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = accelerate_config return info def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = env_command_parser() __SCREAMING_SNAKE_CASE = parser.parse_args() env_command(lowerCAmelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
682
0
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def A ( lowercase__ : np.ndarray ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ) -> np.ndarray: UpperCamelCase__ :Union[str, Any] = np.nan for i in range(lowercase__ ): UpperCamelCase__ :Optional[Any] = features[:, labels == i] UpperCamelCase__ :Any = data.mean(1 ) # Centralize the data of class i UpperCamelCase__ :Tuple = data - column_reshape(lowercase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowercase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) UpperCamelCase__ :str = np.dot(lowercase__ , centered_data.T ) return covariance_sum / features.shape[1] def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ) -> np.ndarray: UpperCamelCase__ :List[Any] = features.mean(1 ) UpperCamelCase__ :List[Any] = np.nan for i in range(lowercase__ ): UpperCamelCase__ :int = features[:, labels == i] UpperCamelCase__ :Optional[int] = data.shape[1] UpperCamelCase__ :Union[str, Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) UpperCamelCase__ :Optional[int] = device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) return covariance_sum / features.shape[1] def A ( lowercase__ : np.ndarray , lowercase__ : int ) -> np.ndarray: # Check if the features have been loaded if features.any(): UpperCamelCase__ :List[str] = features.mean(1 ) # Center the dataset UpperCamelCase__ :List[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) ) UpperCamelCase__ :List[str] = np.dot(lowercase__ , centered_data.T ) / features.shape[1] UpperCamelCase__ , UpperCamelCase__ :Any = np.linalg.eigh(lowercase__ ) # Take all the columns in the reverse order (-1), and then takes only the first UpperCamelCase__ :int = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space UpperCamelCase__ :Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ ) logging.info("""Principal Component Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowercase__ ) logging.error("""Dataset empty""" ) raise AssertionError def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: UpperCamelCase__ , UpperCamelCase__ :int = eigh( covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , ) UpperCamelCase__ :Optional[Any] = eigenvectors[:, ::-1][:, :dimensions] UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = np.linalg.svd(lowercase__ ) UpperCamelCase__ :str = svd_matrix[:, 0:dimensions] UpperCamelCase__ :Optional[Any] = np.dot(filtered_svd_matrix.T , lowercase__ ) logging.info("""Linear Discriminant Analysis computed""" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowercase__ ) logging.error("""Dataset empty""" ) raise AssertionError def A ( ) -> None: # Create dummy dataset with 2 classes and 3 features UpperCamelCase__ :List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) UpperCamelCase__ :int = np.array([0, 0, 0, 1, 1] ) UpperCamelCase__ :List[str] = 2 UpperCamelCase__ :Optional[int] = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowercase__ ) as error_info: UpperCamelCase__ :str = linear_discriminant_analysis( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if isinstance(lowercase__ , np.ndarray ): raise AssertionError( """Did not raise AssertionError for dimensions > classes""" ) assert error_info.type is AssertionError def A ( ) -> None: UpperCamelCase__ :Union[str, Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) UpperCamelCase__ :List[str] = 2 UpperCamelCase__ :Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowercase__ ) as error_info: UpperCamelCase__ :Optional[int] = principal_component_analysis(lowercase__ , lowercase__ ) if not np.allclose(lowercase__ , lowercase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
45
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a__ : int = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a__ : Union[str, Any] = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' a__ : Optional[Any] = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def remove_articles(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams] __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for sgram, scount in sgramcounter.items(): __SCREAMING_SNAKE_CASE = scount * numref __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for cgram, ccount in cgramcounter.items(): __SCREAMING_SNAKE_CASE = ccount * numref # KEEP __SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep __SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __SCREAMING_SNAKE_CASE = 0 if keepscore_precision > 0 or keepscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep __SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 if addscore_precision > 0 or addscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = ssent.split(" " ) __SCREAMING_SNAKE_CASE = csent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for rsent in rsents: __SCREAMING_SNAKE_CASE = rsent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4 __SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ): '''simple docstring''' if lowercase: __SCREAMING_SNAKE_CASE = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sentence if not return_str: __SCREAMING_SNAKE_CASE = normalized_sent.split() return normalized_sent def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError("Sources length must match predictions and references lengths." ) __SCREAMING_SNAKE_CASE = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) __SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ ) return 100 * sari_score def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] __SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) return result
682
0
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Any = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase : Optional[int] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } _lowerCAmelCase : Tuple = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _lowerCAmelCase : int = '''▁''' class A_ ( _a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Dict=False ,__lowerCAmelCase: int="[CLS]" ,__lowerCAmelCase: Optional[Any]="[SEP]" ,__lowerCAmelCase: List[str]="<unk>" ,__lowerCAmelCase: Optional[Any]="[SEP]" ,__lowerCAmelCase: Optional[Any]="<pad>" ,__lowerCAmelCase: Optional[int]="[CLS]" ,__lowerCAmelCase: str="[MASK]" ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' _lowerCamelCase : Tuple = ( AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase ,normalized=__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else mask_token ) _lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCAmelCase ,remove_space=__lowerCAmelCase ,keep_accents=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCAmelCase ,) _lowerCamelCase : Optional[int] = do_lower_case _lowerCamelCase : Optional[int] = remove_space _lowerCamelCase : int = keep_accents _lowerCamelCase : Any = vocab_file _lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def _lowercase ( self: Dict ): '''simple docstring''' return len(self.sp_model ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self: List[str] ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.__dict__.copy() _lowerCamelCase : List[Any] = None return state def __setstate__( self: Any ,__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): _lowerCamelCase : Tuple = {} _lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self: Any ,__lowerCAmelCase: Tuple ): '''simple docstring''' if self.remove_space: _lowerCamelCase : Dict = " ".join(inputs.strip().split() ) else: _lowerCamelCase : str = inputs _lowerCamelCase : Union[str, Any] = outputs.replace("``" ,"\"" ).replace("''" ,"\"" ) if not self.keep_accents: _lowerCamelCase : List[str] = unicodedata.normalize("NFKD" ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] ) if self.do_lower_case: _lowerCamelCase : Any = outputs.lower() return outputs def _lowercase ( self: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.preprocess_text(__lowerCAmelCase ) _lowerCamelCase : List[str] = self.sp_model.encode(__lowerCAmelCase ,out_type=__lowerCAmelCase ) _lowerCamelCase : Dict = [] for piece in pieces: if len(__lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _lowerCamelCase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase ,"" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCamelCase : Optional[Any] = cur_pieces[1:] else: _lowerCamelCase : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCAmelCase ) else: new_pieces.append(__lowerCAmelCase ) return new_pieces def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ): '''simple docstring''' return self.sp_model.PieceToId(__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' return self.sp_model.IdToPiece(__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase : Tuple = [] _lowerCamelCase : Dict = "" _lowerCamelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCAmelCase ) + token _lowerCamelCase : List[Any] = True _lowerCamelCase : List[Any] = [] else: current_sub_tokens.append(__lowerCAmelCase ) _lowerCamelCase : Any = False out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def _lowercase ( self: Any ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ): '''simple docstring''' _lowerCamelCase : str = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase ,token_ids_a=__lowerCAmelCase ,already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1] def _lowercase ( self: str ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ): '''simple docstring''' _lowerCamelCase : Tuple = [self.sep_token_id] _lowerCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowerCamelCase : str = os.path.join( __lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase ,"wb" ) as fi: _lowerCamelCase : Dict = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
46
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : str ) -> Any: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple: __SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str: __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : str = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : int = True def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict: __SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : int ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: # This regression test was failing with PyTorch < 1.3 ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def UpperCAmelCase_ ( self : Optional[int] ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> int: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) ) __SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
682
0
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ): __a : List[Any] = [] for line in lines: __a : Optional[Any] = re.sub(R'#.*' , '' , lowerCamelCase_ ) # remove comments if line: filtered_lines.append(lowerCamelCase_ ) __a : Optional[Any] = '\n'.join(lowerCamelCase_ ) # Make a hash from all this code __a : Optional[Any] = full_str.encode('utf-8' ) return shaaaa(lowerCamelCase_ ).hexdigest() # get importable module names and hash for caching SCREAMING_SNAKE_CASE__ = { '''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), '''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), '''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), '''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), '''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), '''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), '''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), '''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions SCREAMING_SNAKE_CASE__ = { '''.csv''': ('''csv''', {}), '''.tsv''': ('''csv''', {'''sep''': '''\t'''}), '''.json''': ('''json''', {}), '''.jsonl''': ('''json''', {}), '''.parquet''': ('''parquet''', {}), '''.arrow''': ('''arrow''', {}), '''.txt''': ('''text''', {}), } _EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) SCREAMING_SNAKE_CASE__ = {'''imagefolder''', '''audiofolder'''} # Used to filter data files based on extensions given a module name SCREAMING_SNAKE_CASE__ = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''') _MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
47
"""simple docstring""" import os def UpperCAmelCase__ (): '''simple docstring''' with open(os.path.dirname(lowerCAmelCase_ ) + "/p022_names.txt" ) as file: __SCREAMING_SNAKE_CASE = str(file.readlines()[0] ) __SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," ) names.sort() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, name in enumerate(lowerCAmelCase_ ): for letter in name: name_score += ord(lowerCAmelCase_ ) - 64 total_score += (i + 1) * name_score __SCREAMING_SNAKE_CASE = 0 return total_score if __name__ == "__main__": print(solution())
682
0
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : int=1024 , __magic_name__ : Union[str, Any]=1024 , __magic_name__ : Union[str, Any]=3.6 ): """simple docstring""" lowerCAmelCase__ = tokenizer lowerCAmelCase__ = tokenizer.bos_token_id lowerCAmelCase__ = dataset lowerCAmelCase__ = seq_length lowerCAmelCase__ = seq_length * chars_per_token * num_of_sequences def __iter__( self : int ): """simple docstring""" lowerCAmelCase__ = iter(self.dataset ) lowerCAmelCase__ = True while more_examples: lowerCAmelCase__ ,lowerCAmelCase__ = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__magic_name__ )["content"] ) buffer_len += len(buffer[-1] ) except StopIteration: lowerCAmelCase__ = False break lowerCAmelCase__ = tokenizer(__magic_name__ , truncation=__magic_name__ )["input_ids"] lowerCAmelCase__ = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__magic_name__ ) , self.seq_length ): lowerCAmelCase__ = all_token_ids[i : i + self.seq_length] if len(__magic_name__ ) == self.seq_length: yield torch.tensor(__magic_name__ ) def A ( UpperCamelCase_ : str ) -> int: '''simple docstring''' lowerCAmelCase__ = {"streaming": True} lowerCAmelCase__ = load_dataset(args.dataset_name , split="train" , **UpperCamelCase_ ) lowerCAmelCase__ = ConstantLengthDataset(UpperCamelCase_ , UpperCamelCase_ , seq_length=args.seq_length ) lowerCAmelCase__ = DataLoader(UpperCamelCase_ , batch_size=args.batch_size ) return eval_dataloader def A ( UpperCamelCase_ : Tuple ) -> str: '''simple docstring''' model.eval() lowerCAmelCase__ = [] for step, batch in enumerate(UpperCamelCase_ ): with torch.no_grad(): lowerCAmelCase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ ) lowerCAmelCase__ = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(UpperCamelCase_ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowerCAmelCase__ = torch.mean(torch.cat(UpperCamelCase_ ) ) try: lowerCAmelCase__ = torch.exp(UpperCamelCase_ ) except OverflowError: lowerCAmelCase__ = float("inf" ) return loss.item(), perplexity.item() # Setup Accelerator UpperCAmelCase__ : Any = Accelerator() # Parse configuration UpperCAmelCase__ : Tuple = HfArgumentParser(EvaluationArguments) UpperCAmelCase__ : int = parser.parse_args() set_seed(args.seed) # Logging UpperCAmelCase__ : Any = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) # Load model and tokenizer UpperCAmelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader UpperCAmelCase__ : Tuple = create_dataloader(args) # Prepare everything with our `accelerator`. UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("Evaluating and saving model after training") UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = evaluate(args) logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
48
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1.5 __SCREAMING_SNAKE_CASE = int(factor * num_class_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: __SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4: break else: __SCREAMING_SNAKE_CASE = int(factor * num_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ ) with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( f"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: __SCREAMING_SNAKE_CASE = class_images[count] count += 1 try: __SCREAMING_SNAKE_CASE = requests.get(images["url"] ) if img.status_code == 200: __SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ ) return parser.parse_args() if __name__ == "__main__": a__ : Optional[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
682
0
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] ): self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for a, b in zip(_lowercase , _lowercase ): self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase ) def a ( self : List[str] ): __UpperCAmelCase = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_lowercase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a ( self : Tuple ): __UpperCAmelCase = None ops.enable_eager_execution_internal() __UpperCAmelCase = tf.config.list_physical_devices('''CPU''' ) if len(_lowercase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __UpperCAmelCase = tf.config.list_logical_devices(device_type='''CPU''' ) __UpperCAmelCase = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __UpperCAmelCase = GradientAccumulator() __UpperCAmelCase = tf.Variable([4.0, 3.0] ) __UpperCAmelCase , __UpperCAmelCase = create_optimizer(5E-5 , 10 , 5 ) __UpperCAmelCase = tf.Variable([0.0, 0.0] , trainable=_lowercase ) def accumulate_on_replica(_lowercase : Optional[int] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(_lowercase : Dict , _lowercase : Tuple ): with strategy.scope(): __UpperCAmelCase = strategy.experimental_local_results(_lowercase ) local_variables[0].assign(_lowercase ) local_variables[1].assign(_lowercase ) strategy.run(_lowercase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_lowercase ) def _check_local_values(_lowercase : Tuple , _lowercase : Optional[Any] ): __UpperCAmelCase = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
49
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a__ : str = logging.get_logger(__name__) class UpperCamelCase_ ( enum.Enum): """simple docstring""" snake_case__ : Optional[int] = 0 snake_case__ : Dict = 1 @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = "generated" def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if truncation is not None: __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = generate_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None and return_type is None: __SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __SCREAMING_SNAKE_CASE = return_type if clean_up_tokenization_spaces is not None: __SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces if stop_sequence is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __SCREAMING_SNAKE_CASE = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]: return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , UpperCAmelCase__ ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) __SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],) __SCREAMING_SNAKE_CASE = True elif isinstance(args[0] , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (prefix + args[0],) __SCREAMING_SNAKE_CASE = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) __SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) if ( isinstance(args[0] , UpperCAmelCase__ ) and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] ) and all(len(UpperCAmelCase__ ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple: __SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ ) return inputs def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any: if self.framework == "pt": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape elif self.framework == "tf": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy() __SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length ) __SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) __SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output_ids.shape[0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __SCREAMING_SNAKE_CASE = { F"""{self.return_name}_text""": self.tokenizer.decode( UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ) } records.append(UpperCAmelCase__ ) return records @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "summary" def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "translation" def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]: if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ): return self.tokenizer._build_translation_inputs( *UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ ) else: return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ ) if src_lang is not None: __SCREAMING_SNAKE_CASE = src_lang if tgt_lang is not None: __SCREAMING_SNAKE_CASE = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task ) __SCREAMING_SNAKE_CASE = task.split("_" ) if task and len(UpperCAmelCase__ ) == 4: # translation, XX, to YY __SCREAMING_SNAKE_CASE = items[1] __SCREAMING_SNAKE_CASE = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def UpperCamelCase_ ( self ): lowerCamelCase__ = self.dummy_uncond_unet lowerCamelCase__ = ScoreSdeVeScheduler() lowerCamelCase__ = ScoreSdeVePipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_lowerCAmelCase ).images lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_lowerCAmelCase ,return_dict=_lowerCAmelCase )[ 0 ] lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = """google/ncsnpp-church-256""" lowerCamelCase__ = UNetaDModel.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = ScoreSdeVeScheduler.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = ScoreSdeVePipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) sde_ve.to(_lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = sde_ve(num_inference_steps=10 ,output_type="""numpy""" ,generator=_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) lowerCamelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
50
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
0
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: """simple docstring""" print('''Loading config file...''' ) def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ): UpperCAmelCase = [] for k, v in d.items(): UpperCAmelCase = parent_key + sep + k if parent_key else k if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() ) else: items.append((new_key, v) ) return dict(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = argparse.Namespace() with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file: try: UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader ) UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ ) for k, v in flat_cfg.items(): setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) ) return config def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase = MobileViTVaConfig() UpperCAmelCase = False # dataset if task_name.startswith('''imagenet1k_''' ): UpperCAmelCase = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase = 384 else: UpperCAmelCase = 256 UpperCAmelCase = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): UpperCAmelCase = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: UpperCAmelCase = 384 else: UpperCAmelCase = 256 UpperCAmelCase = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): UpperCAmelCase = 151 UpperCAmelCase = 512 UpperCAmelCase = '''ade20k-id2label.json''' UpperCAmelCase = True elif task_name.startswith('''voc_''' ): UpperCAmelCase = 21 UpperCAmelCase = 512 UpperCAmelCase = '''pascal-voc-id2label.json''' UpperCAmelCase = True # orig_config UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ ) assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: """simple docstring""" UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = val def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int: """simple docstring""" if base_model: UpperCAmelCase = '''''' else: UpperCAmelCase = '''mobilevitv2.''' UpperCAmelCase = [] for k in state_dict.keys(): if k[:8] == "encoder.": UpperCAmelCase = k[8:] else: UpperCAmelCase = k if ".block." in k: UpperCAmelCase = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." ) for i in [1, 2]: if f"layer_{i}." in k: UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." ) if ".exp_1x1." in k: UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f"layer_{i}.0." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." ) if f"layer_{i}.1.local_rep.0." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." ) if f"layer_{i}.1.local_rep.1." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." ) for i in [3, 4, 5]: if i == 3: UpperCAmelCase = [0, 1] elif i == 4: UpperCAmelCase = [0, 1, 2, 3] elif i == 5: UpperCAmelCase = [0, 1, 2] for j in j_in: if f"layer_{i}.1.global_rep.{j}." in k: UpperCAmelCase = k_new.replace( f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." ) if f"layer_{i}.1.global_rep.{j+1}." in k: UpperCAmelCase = k_new.replace( f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." ) if f"layer_{i}.1.conv_proj." in k: UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." ) if "pre_norm_attn.0." in k: UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int: """simple docstring""" UpperCAmelCase = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(SCREAMING_SNAKE_CASE_ ) for k in keys_to_ignore: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __snake_case ( ) -> List[Any]: """simple docstring""" UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load original state_dict UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval() UpperCAmelCase = False else: UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() UpperCAmelCase = False # remove and rename some keys of load the original model UpperCAmelCase = checkpoint remove_unused_keys(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load modified state_dict model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) # verify classification model if task_name.startswith('''imagenet''' ): UpperCAmelCase = outputs.logits UpperCAmelCase = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ) assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"Saving model {task_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) a__ : str = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
51
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=None , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # create attention mask __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.seq_length // 2 __SCREAMING_SNAKE_CASE = 0 # first forward pass __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __SCREAMING_SNAKE_CASE = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1 __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = random_other_next_tokens # append to next input_ids and attn_mask __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , ) # get two different outputs __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) # first forward pass __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[ "last_hidden_state" ] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = BioGptForTokenClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> str: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () snake_case__ : Tuple = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Optional[Any] = False def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : int ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = "left" # Define PAD Token = EOS Token = 50256 __SCREAMING_SNAKE_CASE = tokenizer.eos_token __SCREAMING_SNAKE_CASE = model.config.eos_token_id # use different length sentences to test batching __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little", "Today, I", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs["input_ids"].to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , ) __SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = "multi_label_classification" __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = 4_2_3_8_4 __SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( **UpperCAmelCase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class __lowercase ( unittest.TestCase ): '''simple docstring''' def _lowerCamelCase ( self ): __a : Optional[Any] = tf.convert_to_tensor( [ [ 8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0 -0.5_6_2_0_0_4_4, 5.2_3_2_2_9_7_5_2, 4.0_3_8_6_3_9_3, -6.8_7_9_8_3_7_8, -0.5_4_7_8_5_8_0_2, -3.2_0_1_2_1_5_3, 2.9_2_7_7_7_1_7_6, 1.8_8_1_7_1_9_5_3, 7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9 8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10 -9.8_5_7_1_1_8_3_6, -5.9_6_2_0_9_2_3_6, -1.1_3_0_3_9_1_6_1, -7.1_1_1_5_2_9_4, -0.8_3_6_9_6_3_3, -5.3_1_8_6_4_0_8, 7.0_6_4_2_7_4_0_7, 0.8_1_3_6_9_3_4_4, -0.8_2_0_2_3_8_1_7, -5.9_1_7_9_7_9_6, 0.5_8_8_1_3_4_4_3, -6.9_9_7_7_8_4_3_8, 4.7_1_5_5_1_1_8_9, -0.1_8_7_7_1_6_3_7, 7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25 9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26 2.1_2_6_6_2_9_4_1, -9.3_2_5_6_2_0_3_8, 2.3_5_6_5_2_5_2_2, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5_8_4_2_5_5_1_8, 4.5_3_1_3_9_2_3_8, -5.5_7_5_1_0_4_6_4, -6.2_8_0_3_0_6_9_9, -7.1_9_5_2_9_5_0_3, -4.0_2_1_2_2_5_5_1, 1.3_9_3_3_7_0_3_7, -6.0_6_7_0_7_0_5_7, 1.5_9_4_8_0_5_1_7, -9.6_4_3_1_1_9, 0.0_3_9_0_7_7_9_9, 0.6_7_2_3_1_7_6_2, -8.8_8_2_0_6_7_2_6, 6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13 2.2_8_5_2_0_7_2_3, 4.8_2_7_6_7_5_0_6, 4.3_0_4_2_1_3_6_8, 8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17 5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18 -4.4_7_3_5_7_9_4, 7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20 -2.9_1_0_5_1_6_6_3, 2.6_1_9_4_6_0_7_7, -2.5_6_7_4_7_6_2, -9.4_8_9_5_9_3_0_2, -4.0_2_9_2_2_6_4_5, -1.3_5_4_1_6_9_1_8, 9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27 -5.8_9_4_7_8_5_5_3, 1.8_5_3_7_0_4_6_7, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __a : Optional[int] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __a : Dict = tf.convert_to_tensor( [8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above __a : Union[str, Any] = tf_top_k_top_p_filtering(_UpperCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __a : Tuple = output[output != -float('''inf''' )] __a : Union[str, Any] = tf.cast( tf.where(tf.not_equal(_UpperCAmelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-1_2 ) tf.debugging.assert_equal(_UpperCAmelCase , _UpperCAmelCase ) @require_tf class __lowercase ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' if is_tf_available(): __lowerCAmelCase = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def _lowerCamelCase ( self ): # TF-only test: tf.saved_model export __a : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a : Any = 2 __a : str = 2 class __lowercase ( tf.Module ): '''simple docstring''' def __init__( self , _UpperCAmelCase ): super(_UpperCAmelCase , self ).__init__() __a : List[Any] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=_UpperCAmelCase , ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): __a : List[Any] = self.model.generate( input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , max_new_tokens=_UpperCAmelCase , return_dict_in_generate=_UpperCAmelCase , ) return {"sequences": outputs["sequences"]} __a : List[str] = [[2, 0], [102, 103]] __a : List[str] = [[1, 0], [1, 1]] __a : Optional[int] = DummyModel(model=_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_UpperCAmelCase , _UpperCAmelCase , signatures={'''serving_default''': dummy_model.serving} ) __a : Union[str, Any] = tf.saved_model.load(_UpperCAmelCase ).signatures['''serving_default'''] for batch_size in range(1 , len(_UpperCAmelCase ) + 1 ): __a : Optional[int] = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } __a : List[str] = serving_func(**_UpperCAmelCase )['''sequences'''] __a : List[Any] = test_model.generate(**_UpperCAmelCase , max_new_tokens=_UpperCAmelCase ) tf.debugging.assert_equal(_UpperCAmelCase , _UpperCAmelCase ) @slow def _lowerCamelCase ( self ): # TF-only test: tf.saved_model export __a : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a : List[Any] = 1 __a : Any = 2 class __lowercase ( tf.Module ): '''simple docstring''' def __init__( self , _UpperCAmelCase ): super(_UpperCAmelCase , self ).__init__() __a : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=_UpperCAmelCase , ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): __a : str = self.model.generate( input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , max_new_tokens=_UpperCAmelCase , return_dict_in_generate=_UpperCAmelCase , ) return {"sequences": outputs["sequences"]} __a : Optional[Any] = [[2], [102, 103]] __a : List[Any] = [[1], [1, 1]] __a : Dict = DummyModel(model=_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_UpperCAmelCase , _UpperCAmelCase , signatures={'''serving_default''': dummy_model.serving} ) __a : List[Any] = tf.saved_model.load(_UpperCAmelCase ).signatures['''serving_default'''] for input_row in range(len(_UpperCAmelCase ) ): __a : Optional[Any] = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } __a : List[str] = serving_func(**_UpperCAmelCase )['''sequences'''] __a : List[Any] = test_model.generate(**_UpperCAmelCase , max_new_tokens=_UpperCAmelCase ) tf.debugging.assert_equal(_UpperCAmelCase , _UpperCAmelCase ) @slow @require_tensorflow_text def _lowerCamelCase ( self ): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCAmelCase ) class __lowercase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ): super().__init__() __a : List[Any] = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_UpperCAmelCase , '''spiece.model''' ) , '''rb''' ).read() ) __a : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def _lowerCamelCase ( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ): __a : Union[str, Any] = self.tokenizer.tokenize(_UpperCAmelCase ) __a , __a : str = text.pad_model_inputs( _UpperCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __a : int = self.model.generate(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) return self.tokenizer.detokenize(_UpperCAmelCase ) __a : Tuple = CompleteSentenceTransformer() __a : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) __a : Tuple = complete_model(_UpperCAmelCase ) __a : Optional[int] = tf.keras.Model(_UpperCAmelCase , _UpperCAmelCase ) keras_model.save(_UpperCAmelCase ) def _lowerCamelCase ( self ): # Has PT equivalent: this test relies on random sampling __a : int = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } __a : int = 14 __a : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a : Tuple = '''Hello, my dog is cute and''' __a : Any = tokenizer(_UpperCAmelCase , return_tensors='''tf''' ) __a : Dict = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __a : List[Any] = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) __a : int = model.generate(**_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __a : Any = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) __a : Optional[Any] = model.generate(**_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _lowerCamelCase ( self ): # Has PT equivalent: ample use of framework-specific code __a : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) __a : Optional[int] = '''Hugging Face is a technology company based in New York and Paris.''' __a : Union[str, Any] = bart_tokenizer(_UpperCAmelCase , return_tensors='''tf''' ).input_ids __a : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) __a : Dict = bart_model.generate(_UpperCAmelCase ).numpy() class __lowercase ( _UpperCamelCase ): '''simple docstring''' def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return super().call(_UpperCAmelCase , **_UpperCAmelCase ) __a : Any = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) __a : Optional[int] = bart_model.generate(_UpperCAmelCase , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(_UpperCAmelCase , _UpperCAmelCase ) ) class __lowercase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ): return super().call(_UpperCAmelCase , **_UpperCAmelCase ) __a : Optional[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __a : Optional[int] = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __a : Tuple = bart_model.generate(_UpperCAmelCase ).numpy() with self.assertRaises(_UpperCAmelCase ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_UpperCAmelCase , foo='''bar''' )
52
"""simple docstring""" import os import pytest from attr import dataclass a__ : int = '''us-east-1''' # defaults region @dataclass class UpperCamelCase_ : """simple docstring""" snake_case__ : str snake_case__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" snake_case__ : Optional[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } snake_case__ : Tuple = {**hyperparameters, "max_steps": 1000} @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self : int ) -> str: return F"""{self.framework}-transfromers-test""" @property def UpperCAmelCase_ ( self : List[Any] ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
682
0
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch _snake_case : List[str] = logging.get_logger(__name__) @add_end_docstrings( _UpperCamelCase , R""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" def lowercase ( self : List[str] , lowerCAmelCase_ : GenericTensor ) -> np.ndarray: if self.framework == "tf": __lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": __lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ ) else: raise ValueError('Unsupported framework' ) return masked_index def lowercase ( self : Tuple , lowerCAmelCase_ : GenericTensor ) -> np.ndarray: __lowerCAmelCase = self.get_masked_index(lowerCAmelCase_ ) __lowerCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def lowercase ( self : Optional[int] , lowerCAmelCase_ : GenericTensor ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(lowerCAmelCase_ ) def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : List[Any] ) -> Dict[str, GenericTensor]: if return_tensors is None: __lowerCAmelCase = self.framework __lowerCAmelCase = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) self.ensure_exactly_one_mask_token(lowerCAmelCase_ ) return model_inputs def lowercase ( self : Optional[Any] , lowerCAmelCase_ : str ) -> List[Any]: __lowerCAmelCase = self.model(**lowerCAmelCase_ ) __lowerCAmelCase = model_inputs['input_ids'] return model_outputs def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : int=None ) -> List[Any]: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: __lowerCAmelCase = target_ids.shape[0] __lowerCAmelCase = model_outputs['input_ids'][0] __lowerCAmelCase = model_outputs['logits'] if self.framework == "tf": __lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] __lowerCAmelCase = outputs.numpy() __lowerCAmelCase = outputs[0, masked_index, :] __lowerCAmelCase = stable_softmax(lowerCAmelCase_ , axis=-1 ) if target_ids is not None: __lowerCAmelCase = tf.gather_nd(tf.squeeze(lowerCAmelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) ) __lowerCAmelCase = tf.expand_dims(lowerCAmelCase_ , 0 ) __lowerCAmelCase = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ ) __lowerCAmelCase , __lowerCAmelCase = topk.values.numpy(), topk.indices.numpy() else: __lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample __lowerCAmelCase = outputs[0, masked_index, :] __lowerCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: __lowerCAmelCase = probs[..., target_ids] __lowerCAmelCase , __lowerCAmelCase = probs.topk(lowerCAmelCase_ ) __lowerCAmelCase = [] __lowerCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): __lowerCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place __lowerCAmelCase = input_ids.numpy().copy() if target_ids is not None: __lowerCAmelCase = target_ids[p].tolist() __lowerCAmelCase = p # Filter padding out: __lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __lowerCAmelCase = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) __lowerCAmelCase = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence} row.append(lowerCAmelCase_ ) result.append(lowerCAmelCase_ ) if single_mask: return result[0] return result def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None ) -> List[str]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowerCAmelCase = [targets] try: __lowerCAmelCase = self.tokenizer.get_vocab() except Exception: __lowerCAmelCase = {} __lowerCAmelCase = [] for target in targets: __lowerCAmelCase = vocab.get(lowerCAmelCase_ , lowerCAmelCase_ ) if id_ is None: __lowerCAmelCase = self.tokenizer( lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , max_length=1 , truncation=lowerCAmelCase_ , )['input_ids'] if len(lowerCAmelCase_ ) == 0: logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ 'We cannot replace it with anything meaningful, ignoring it' ) continue __lowerCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) __lowerCAmelCase = list(set(lowerCAmelCase_ ) ) if len(lowerCAmelCase_ ) == 0: raise ValueError('At least one target must be provided when passed.' ) __lowerCAmelCase = np.array(lowerCAmelCase_ ) return target_ids def lowercase ( self : int , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=None ) -> Tuple: __lowerCAmelCase = {} if targets is not None: __lowerCAmelCase = self.get_target_ids(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = target_ids if top_k is not None: __lowerCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self : List[Any] , lowerCAmelCase_ : Optional[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: __lowerCAmelCase = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1: return outputs[0] return outputs
53
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging a__ : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any: warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , UpperCAmelCase__ , ) super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): _snake_case =StableDiffusionInstructPixaPixPipeline _snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} _snake_case =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS _snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase__ ( self: Tuple ) -> int: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase_ =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase ) torch.manual_seed(0 ) UpperCAmelCase_ =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase_ =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase_ =CLIPTextModel(_lowerCAmelCase ) UpperCAmelCase_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ ={ "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple=0 ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ) if str(_lowerCAmelCase ).startswith("mps" ): UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase ) else: UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) UpperCAmelCase_ ={ "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase__ ( self: Optional[int] ) -> str: '''simple docstring''' UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ =self.get_dummy_components() UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase ) UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images UpperCAmelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ =self.get_dummy_components() UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase ) UpperCAmelCase_ ="french fries" UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase ) UpperCAmelCase_ =output.images UpperCAmelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ =self.get_dummy_components() UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase ) UpperCAmelCase_ =[inputs["prompt"]] * 2 UpperCAmelCase_ =np.array(inputs["image"] ).astype(np.floataa ) / 2_55.0 UpperCAmelCase_ =torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase ) UpperCAmelCase_ =image / 2 + 0.5 UpperCAmelCase_ =image.permute(0 , 3 , 1 , 2 ) UpperCAmelCase_ =image.repeat(2 , 1 , 1 , 1 ) UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images UpperCAmelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) UpperCAmelCase_ =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowerCAmelCase__ ( self: Union[str, Any] ) -> str: '''simple docstring''' UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ =self.get_dummy_components() UpperCAmelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" ) UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase ) UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images UpperCAmelCase_ =image[0, -3:, -3:, -1] UpperCAmelCase_ =[round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(_lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowerCAmelCase__ ( self: str ) -> Dict: '''simple docstring''' UpperCAmelCase_ =self.get_dummy_components() UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase ) UpperCAmelCase_ =VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase ) UpperCAmelCase_ =pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ =pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" ) )[0] UpperCAmelCase_ =components["vae"] UpperCAmelCase_ =self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): UpperCAmelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() UpperCAmelCase_ =pipe(**_lowerCAmelCase )[0] UpperCAmelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(_lowerCAmelCase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class A ( unittest.TestCase ): def lowerCAmelCase__ ( self: Dict ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Tuple=0 ) -> Tuple: '''simple docstring''' UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase ) UpperCAmelCase_ =load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) UpperCAmelCase_ ={ "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def lowerCAmelCase__ ( self: Optional[Any] ) -> int: '''simple docstring''' UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ =self.get_inputs() UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowerCAmelCase__ ( self: Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase ) UpperCAmelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ =self.get_inputs() UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowerCAmelCase__ ( self: Dict ) -> str: '''simple docstring''' UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase ) UpperCAmelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ =self.get_inputs() UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowerCAmelCase__ ( self: Dict ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =0 def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None: UpperCAmelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCAmelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) UpperCAmelCase_ =latents[0, -3:, -3:, -1] UpperCAmelCase_ =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: UpperCAmelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) UpperCAmelCase_ =latents[0, -3:, -3:, -1] UpperCAmelCase_ =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 UpperCAmelCase_ =False UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa ) UpperCAmelCase_ =pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ =self.get_inputs() pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa ) UpperCAmelCase_ =pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ =self.get_inputs() UpperCAmelCase_ =pipe(**_lowerCAmelCase ) UpperCAmelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowerCAmelCase__ ( self: Dict ) -> str: '''simple docstring''' UpperCAmelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 UpperCAmelCase_ =inputs["image"].resize((504, 504) ) UpperCAmelCase_ ="timbrooks/instruct-pix2pix" UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( _lowerCAmelCase , safety_checker=_lowerCAmelCase , ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ =pipe(**_lowerCAmelCase ) UpperCAmelCase_ =output.images[0] UpperCAmelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) UpperCAmelCase_ =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
54
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ) # create the counting array __SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min __SCREAMING_SNAKE_CASE = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1] # create the output collection __SCREAMING_SNAKE_CASE = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCAmelCase_ ) ): __SCREAMING_SNAKE_CASE = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" a__ : Dict = input('''Enter numbers separated by a comma:\n''').strip() a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
682
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def UpperCamelCase_ ( self : Tuple ,A : List[Any] ,A : int ,A : Any ): __A = hf_hub_download( repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" ) __A = VideoClassificationPipeline(model=A ,image_processor=A ,top_k=2 ) __A = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def UpperCamelCase_ ( self : str ,A : Union[str, Any] ,A : Dict ): for example in examples: __A = video_classifier(A ) self.assertEqual( A ,[ {"score": ANY(A ), "label": ANY(A )}, {"score": ANY(A ), "label": ANY(A )}, ] ,) @require_torch def UpperCamelCase_ ( self : Tuple ): __A = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" __A = VideoMAEFeatureExtractor( size={"shortest_edge": 10} ,crop_size={"height": 10, "width": 10} ) __A = pipeline( "video-classification" ,model=A ,feature_extractor=A ,frame_sampling_rate=4 ) __A = hf_hub_download(repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" ) __A = video_classifier(A ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] ,) __A = video_classifier( [ video_file_path, video_file_path, ] ,top_k=2 ,) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[ [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}], [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}], ] ,) @require_tf def UpperCamelCase_ ( self : Optional[int] ): pass
55
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
682
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _a : str = logging.get_logger(__name__) class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : List[Any] = ["pixel_values"] def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : int = 32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> None: __snake_case = do_resize __snake_case = do_rescale __snake_case = size_divisor __snake_case = resample super().__init__(**SCREAMING_SNAKE_CASE_ ) def a ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : Tuple ) -> np.ndarray: __snake_case , __snake_case = get_image_size(SCREAMING_SNAKE_CASE_ ) # Rounds the height and width down to the closest multiple of size_divisor __snake_case = height // size_divisor * size_divisor __snake_case = width // size_divisor * size_divisor __snake_case = resize(SCREAMING_SNAKE_CASE_ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return image def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : List[str] ) -> np.ndarray: return rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> BatchFeature: __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = size_divisor if size_divisor is not None else self.size_divisor __snake_case = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('size_divisor is required for resizing' ) __snake_case = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError('Invalid image(s)' ) # All transformations expect numpy arrays. __snake_case = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for img in images] if do_resize: __snake_case = [self.resize(SCREAMING_SNAKE_CASE_ , size_divisor=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: __snake_case = [self.rescale(SCREAMING_SNAKE_CASE_ , scale=1 / 255 ) for image in images] __snake_case = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] __snake_case = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
56
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : List[str] = logging.get_logger(__name__) a__ : str = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "xlm-roberta" def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"} else: __SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
682
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Optional[Any] = logging.get_logger(__name__) A_ : Optional[Any] = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : Dict ='''distilbert''' a : List[str] ={ '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=1_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=4 * 7_6_8 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ): UpperCamelCase_: Tuple = vocab_size UpperCamelCase_: str = max_position_embeddings UpperCamelCase_: Optional[int] = sinusoidal_pos_embds UpperCamelCase_: Union[str, Any] = n_layers UpperCamelCase_: Optional[int] = n_heads UpperCamelCase_: int = dim UpperCamelCase_: Tuple = hidden_dim UpperCamelCase_: Any = dropout UpperCamelCase_: Optional[Any] = attention_dropout UpperCamelCase_: List[str] = activation UpperCamelCase_: Optional[Any] = initializer_range UpperCamelCase_: Optional[Any] = qa_dropout UpperCamelCase_: List[str] = seq_classif_dropout super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase ) class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" @property def _a ( self ): if self.task == "multiple-choice": UpperCamelCase_: Any = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase_: List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
57
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ ) return flax_params def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } __SCREAMING_SNAKE_CASE = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __SCREAMING_SNAKE_CASE = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flax_dict[key] __SCREAMING_SNAKE_CASE = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T ) else: __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_flax_param(lowerCAmelCase_ ) if not use_large: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig() __SCREAMING_SNAKE_CASE = PixaStructTextConfig() else: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) __SCREAMING_SNAKE_CASE = PixaStructImageProcessor() __SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) if use_large: __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = True # mkdir if needed os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) print("Model saved in {}".format(lowerCAmelCase_ ) ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') a__ : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
682
0
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0_0_0_0 , __UpperCamelCase : int = 1_0 ): '''simple docstring''' snake_case_ : defaultdict = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: snake_case_ : Optional[Any] = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: snake_case_ : List[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
58
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a__ : Optional[Any] = 1_6 a__ : str = 3_2 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" ) __SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE = 8 else: __SCREAMING_SNAKE_CASE = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a__ : List[Any] = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1": __SCREAMING_SNAKE_CASE = 2 # Initialize accelerator __SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["num_epochs"] ) __SCREAMING_SNAKE_CASE = int(config["seed"] ) __SCREAMING_SNAKE_CASE = int(config["batch_size"] ) __SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate scheduler __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
682
0
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Optional[int] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: List[Any] =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Tuple =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features lowerCamelCase__: int =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if split: lowerCamelCase__: Any ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: str =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> int: """simple docstring""" lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: List[str] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: Optional[Any] =Features({"image": Image()} ) lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(__a ) == expected
59
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a__ : Dict = logging.get_logger(__name__) # General docstring a__ : str = '''RegNetConfig''' # Base docstring a__ : List[str] = '''facebook/regnet-y-040''' a__ : int = [1, 1_0_8_8, 7, 7] # Image classification docstring a__ : int = '''facebook/regnet-y-040''' a__ : str = '''tabby, tabby cat''' a__ : Optional[Any] = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Tuple , ) -> Any: super().__init__(**UpperCAmelCase__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __SCREAMING_SNAKE_CASE = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding="VALID" , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" , ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else tf.identity def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.convolution(self.padding(UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_channels __SCREAMING_SNAKE_CASE = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = shape_list(UpperCAmelCase__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) ) __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ ) class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) __SCREAMING_SNAKE_CASE = [ tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] ) -> Any: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) for layer_module in self.attention: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = hidden_state * pooled return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.2" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str ) -> Any: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[Any] ) -> Any: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.3" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __SCREAMING_SNAKE_CASE = [ # downsampling is done in the first layer with stride of 2 layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name="layers.0" ), *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int ) -> int: for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Any ) -> List[str]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"""stages.{i+1}""" ) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> TFBaseModelOutputWithNoAttention: __SCREAMING_SNAKE_CASE = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) __SCREAMING_SNAKE_CASE = stage_module(UpperCAmelCase__ ) if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) @keras_serializable class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" snake_case__ : Any = RegNetConfig def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = TFRegNetEmbeddings(UpperCAmelCase__ , name="embedder" ) __SCREAMING_SNAKE_CASE = TFRegNetEncoder(UpperCAmelCase__ , name="encoder" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) @unpack_inputs def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = encoder_outputs[0] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) # Change to NCHW output format have uniformity in the modules __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __SCREAMING_SNAKE_CASE = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : List[Any] = RegNetConfig snake_case__ : List[str] = "regnet" snake_case__ : str = "pixel_values" @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} a__ : Union[str, Any] = r''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' a__ : Optional[int] = r''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Tuple: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> Any: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_labels __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) # classification head __SCREAMING_SNAKE_CASE = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1] __SCREAMING_SNAKE_CASE = self.classifier[0](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.classifier[1](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ ) if not return_dict: __SCREAMING_SNAKE_CASE = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
682
0
lowerCAmelCase_ = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
60
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
682
0
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=512 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=4 , ) -> Optional[int]: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_attention_mask lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = num_choices def a ( self : Union[str, Any] ) -> Optional[int]: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_attention_mask: lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a ( self : List[str] ) -> Union[str, Any]: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def a ( self : Optional[Any] ) -> Dict: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = True lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = True snake_case__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def a ( self : int ) -> Dict: lowerCAmelCase__ = FlaxRobertaPreLayerNormModelTester(self ) @slow def a ( self : Tuple ) -> Union[str, Any]: for model_class_name in self.all_model_classes: lowerCAmelCase__ = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_flax class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def a ( self : int ) -> Dict: lowerCAmelCase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] lowerCAmelCase__ = [1, 11, 50_265] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE__ ) # compare the actual values for a slice. lowerCAmelCase__ = np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) @slow def a ( self : Union[str, Any] ) -> Optional[int]: lowerCAmelCase__ = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] # compare the actual values for a slice. lowerCAmelCase__ = np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
61
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = r''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]: __SCREAMING_SNAKE_CASE = max_length __SCREAMING_SNAKE_CASE = max_position_embeddings @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool: __SCREAMING_SNAKE_CASE = input_ids.shape[-1] __SCREAMING_SNAKE_CASE = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = start_length __SCREAMING_SNAKE_CASE = max_new_tokens __SCREAMING_SNAKE_CASE = start_length + max_new_tokens @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict: __SCREAMING_SNAKE_CASE = max_time __SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool: return time.time() - self.initial_timestamp > self.max_time class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool: return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self ) @property def UpperCAmelCase_ ( self : Any ) -> Optional[int]: for stopping_criterium in self: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length return None def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = stopping_criteria.max_length __SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) ) return new_stopping_criteria
682
0
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging snake_case = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = "https://pypi.org/pypi/diffusers/json" SCREAMING_SNAKE_CASE : List[str] = json.loads(request.urlopen(lowercase ).read() )["releases"].keys() return sorted(lowercase , key=lambda lowercase : version.Version(lowercase ) ) def lowerCamelCase__ ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) SCREAMING_SNAKE_CASE : List[str] = Path(lowercase ) / "__init__.py" if not init_path.exists(): init_path.touch() def lowerCamelCase__ ( lowercase ): """simple docstring""" init_hf_modules() SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowercase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(lowercase , exist_ok=lowercase ) SCREAMING_SNAKE_CASE : Any = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def lowerCamelCase__ ( lowercase ): """simple docstring""" with open(lowercase , "r" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Dict = f.read() # Imports of the form `import .xxx` SCREAMING_SNAKE_CASE : int = re.findall("^\s*import\s+\.(\S+)\s*$" , lowercase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , lowercase , flags=re.MULTILINE ) # Unique-ify return list(set(lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : List[str] = [module_file] SCREAMING_SNAKE_CASE : Dict = [] # Let's recurse through all relative imports while not no_change: SCREAMING_SNAKE_CASE : Optional[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(lowercase ) ) SCREAMING_SNAKE_CASE : str = Path(lowercase ).parent SCREAMING_SNAKE_CASE : int = [str(module_path / m ) for m in new_imports] SCREAMING_SNAKE_CASE : List[str] = [f for f in new_import_files if f not in all_relative_imports] SCREAMING_SNAKE_CASE : Dict = [F'''{f}.py''' for f in new_import_files] SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) == 0 all_relative_imports.extend(lowercase ) return all_relative_imports def lowerCamelCase__ ( lowercase ): """simple docstring""" with open(lowercase , "r" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE : Optional[Any] = f.read() # Imports of the form `import xxx` SCREAMING_SNAKE_CASE : int = re.findall("^\s*import\s+(\S+)\s*$" , lowercase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , lowercase , flags=re.MULTILINE ) # Only keep the top-level module SCREAMING_SNAKE_CASE : List[Any] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all SCREAMING_SNAKE_CASE : Tuple = list(set(lowercase ) ) SCREAMING_SNAKE_CASE : Any = [] for imp in imports: try: importlib.import_module(lowercase ) except ImportError: missing_packages.append(lowercase ) if len(lowercase ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " F'''{', '.join(lowercase )}. Run `pip install {' '.join(lowercase )}`''' ) return get_relative_imports(lowercase ) def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = module_path.replace(os.path.sep , "." ) SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(lowercase ) if class_name is None: return find_pipeline_class(lowercase ) return getattr(lowercase , lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" from ..pipelines import DiffusionPipeline SCREAMING_SNAKE_CASE : Union[str, Any] = dict(inspect.getmembers(lowercase , inspect.isclass ) ) SCREAMING_SNAKE_CASE : Tuple = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , lowercase ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) SCREAMING_SNAKE_CASE : Optional[int] = cls return pipeline_class def lowerCamelCase__ ( lowercase , lowercase , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase ) SCREAMING_SNAKE_CASE : Dict = os.path.join(lowercase , lowercase ) if os.path.isfile(lowercase ): SCREAMING_SNAKE_CASE : Any = module_file_or_url SCREAMING_SNAKE_CASE : Any = "local" elif pretrained_model_name_or_path.count("/" ) == 0: SCREAMING_SNAKE_CASE : int = get_diffusers_versions() # cut ".dev0" SCREAMING_SNAKE_CASE : int = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: SCREAMING_SNAKE_CASE : str = latest_version if latest_version[1:] in available_versions else "main" logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: SCREAMING_SNAKE_CASE : Tuple = F'''v{revision}''' elif revision == "main": SCREAMING_SNAKE_CASE : Optional[Any] = revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub SCREAMING_SNAKE_CASE : Optional[Any] = COMMUNITY_PIPELINES_URL.format(revision=lowercase , pipeline=lowercase ) try: SCREAMING_SNAKE_CASE : Optional[int] = cached_download( lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , local_files_only=lowercase , use_auth_token=lowercase , ) SCREAMING_SNAKE_CASE : str = "git" SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download( lowercase , lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , local_files_only=lowercase , use_auth_token=lowercase , ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment SCREAMING_SNAKE_CASE : Optional[int] = check_imports(lowercase ) # Now we move the module inside our cached dynamic modules. SCREAMING_SNAKE_CASE : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(lowercase ) SCREAMING_SNAKE_CASE : Any = Path(lowercase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(lowercase , submodule_path / module_file ) for module_needed in modules_needed: SCREAMING_SNAKE_CASE : Dict = F'''{module_needed}.py''' shutil.copy(os.path.join(lowercase , lowercase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(lowercase , lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = use_auth_token elif use_auth_token is True: SCREAMING_SNAKE_CASE : List[Any] = HfFolder.get_token() else: SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Any = model_info(lowercase , revision=lowercase , token=lowercase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. SCREAMING_SNAKE_CASE : str = submodule_path / commit_hash SCREAMING_SNAKE_CASE : Union[str, Any] = full_submodule + os.path.sep + commit_hash create_dynamic_module(lowercase ) if not (submodule_path / module_file).exists(): shutil.copy(lowercase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( lowercase , F'''{module_needed}.py''' , cache_dir=lowercase , force_download=lowercase , resume_download=lowercase , proxies=lowercase , use_auth_token=lowercase , revision=lowercase , local_files_only=lowercase , ) return os.path.join(lowercase , lowercase ) def lowerCamelCase__ ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , **lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = get_cached_module_file( lowercase , lowercase , cache_dir=lowercase , force_download=lowercase , resume_download=lowercase , proxies=lowercase , use_auth_token=lowercase , revision=lowercase , local_files_only=lowercase , ) return get_class_in_module(lowercase , final_module.replace(".py" , "" ) )
62
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : int = RoCBertTokenizer snake_case__ : int = None snake_case__ : Optional[Any] = False snake_case__ : int = True snake_case__ : Any = filter_non_english def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: super().setUp() __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} for i, value in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> List[str]: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase_ ( self : str ) -> List[str]: __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __SCREAMING_SNAKE_CASE = {} for i, token in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase_ ( self : List[Any] ) -> str: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase_ ( self : List[str] ) -> Tuple: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False __SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "Allen"), ((2_1, 2_3), "##NL"), ((2_3, 2_4), "##P"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "allen"), ((2_1, 2_3), "##nl"), ((2_3, 2_4), "##p"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = ["的", "人", "有"] __SCREAMING_SNAKE_CASE = "".join(UpperCAmelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ ) ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def UpperCAmelCase_ ( self : str ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __SCREAMING_SNAKE_CASE = "你好,你是谁" __SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase__ ( __lowerCamelCase : Tuple ): __UpperCAmelCase : str = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2] __UpperCAmelCase : Any = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : int = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 3, 3] __UpperCAmelCase : Union[str, Any] = [5, 5, 5, 5] elif "fl4" in model_name: __UpperCAmelCase : str = [4, 4, 4, 4] __UpperCAmelCase : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __UpperCAmelCase : Dict = [3, 3, 3, 3] if "lrf" in model_name: __UpperCAmelCase : Optional[Any] = [3, 3, 3, 3] else: __UpperCAmelCase : Optional[int] = [2, 2, 2, 2] if "tiny" in model_name: __UpperCAmelCase : List[str] = 96 elif "small" in model_name: __UpperCAmelCase : Dict = 96 elif "base" in model_name: __UpperCAmelCase : List[Any] = 128 elif "large" in model_name: __UpperCAmelCase : Any = 192 elif "xlarge" in model_name: __UpperCAmelCase : Tuple = 256 elif "huge" in model_name: __UpperCAmelCase : int = 352 # set label information __UpperCAmelCase : Tuple = """huggingface/label-files""" if "large" in model_name or "huge" in model_name: __UpperCAmelCase : Any = """imagenet-22k-id2label.json""" else: __UpperCAmelCase : Dict = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : Optional[int] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : List[str] = FocalNetConfig( embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , ) return config def lowerCamelCase__ ( __lowerCamelCase : Tuple ): if "patch_embed.proj" in name: __UpperCAmelCase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __UpperCAmelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: __UpperCAmelCase : int = """encoder.""" + name if "encoder.layers" in name: __UpperCAmelCase : Optional[int] = name.replace("""encoder.layers""" , """encoder.stages""" ) if "downsample.proj" in name: __UpperCAmelCase : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""" ) if "blocks" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""blocks""" , """layers""" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __UpperCAmelCase : List[str] = name.replace("""modulation.f""" , """modulation.projection_in""" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __UpperCAmelCase : List[Any] = name.replace("""modulation.h""" , """modulation.projection_context""" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __UpperCAmelCase : str = name.replace("""modulation.proj""" , """modulation.projection_out""" ) if name == "norm.weight": __UpperCAmelCase : Optional[Any] = """layernorm.weight""" if name == "norm.bias": __UpperCAmelCase : Dict = """layernorm.bias""" if "head" in name: __UpperCAmelCase : Tuple = name.replace("""head""" , """classifier""" ) else: __UpperCAmelCase : int = """focalnet.""" + name return name def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ): # fmt: off __UpperCAmelCase : Dict = { """focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""", """focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""", """focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""", """focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""", """focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""", """focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""", """focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""", """focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""", """focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""", """focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""", } # fmt: on __UpperCAmelCase : int = model_name_to_url[model_name] print("""Checkpoint URL: """ , __lowerCamelCase ) __UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""model"""] # rename keys for key in state_dict.copy().keys(): __UpperCAmelCase : Tuple = state_dict.pop(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = val __UpperCAmelCase : Optional[Any] = get_focalnet_config(__lowerCamelCase ) __UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase ) model.eval() # load state dict model.load_state_dict(__lowerCamelCase ) # verify conversion __UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Union[str, Any] = BitImageProcessor( do_resize=__lowerCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , ) __UpperCAmelCase : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) __UpperCAmelCase : List[Any] = processor(images=__lowerCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : str = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __UpperCAmelCase : List[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 ) __UpperCAmelCase : Dict = model(**__lowerCamelCase ) __UpperCAmelCase : Any = outputs.logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) print("""First values of logits:""" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __UpperCAmelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __UpperCAmelCase : int = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __UpperCAmelCase : int = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __UpperCAmelCase : Optional[int] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __UpperCAmelCase : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(f"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(f"""{model_name}""" ) processor.push_to_hub(f"""{model_name}""" ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) a : Any = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
63
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[int] = "vivit" def __init__( self : Dict , UpperCAmelCase__ : Dict=2_2_4 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : str=[2, 1_6, 1_6] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : Optional[int]="gelu_fast" , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : str=1E-06 , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Any , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_frames __SCREAMING_SNAKE_CASE = tubelet_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = qkv_bias super().__init__(**UpperCAmelCase__ )
682
0
def A__ ( snake_case_ : int ): if not isinstance(snake_case_ , snake_case_ ): raise TypeError('''Input value must be an \'int\' type''' ) SCREAMING_SNAKE_CASE__: Tuple= 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
64
"""simple docstring""" import numpy as np from transformers import Pipeline def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = np.max(lowerCAmelCase_ , axis=-1 , keepdims=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def UpperCAmelCase_ ( self : Tuple , **UpperCAmelCase__ : str ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if "second_text" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["second_text"] return preprocess_kwargs, {}, {} def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None ) -> str: return self.tokenizer(UpperCAmelCase__ , text_pair=UpperCAmelCase__ , return_tensors=self.framework ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: return self.model(**UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = model_outputs.logits[0].numpy() __SCREAMING_SNAKE_CASE = softmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.model.config.idalabel[best_class] __SCREAMING_SNAKE_CASE = probabilities[best_class].item() __SCREAMING_SNAKE_CASE = logits.tolist() return {"label": label, "score": score, "logits": logits}
682
0
"""simple docstring""" import os import pytest from attr import dataclass __UpperCAmelCase = 'us-east-1' # defaults region @dataclass class __lowercase : snake_case_ = 42 snake_case_ = """arn:aws:iam::558105141721:role/sagemaker_execution_role""" snake_case_ = { """task_name""": """mnli""", """per_device_train_batch_size""": 1_6, """per_device_eval_batch_size""": 1_6, """do_train""": True, """do_eval""": True, """do_predict""": True, """output_dir""": """/opt/ml/model""", """overwrite_output_dir""": True, """max_steps""": 5_0_0, """save_steps""": 5_5_0_0, } snake_case_ = {**hyperparameters, """max_steps""": 1_0_0_0} @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase ( self : List[str] ): '''simple docstring''' return f"{self.framework}-transfromers-test" @property def __lowercase ( self : str ): '''simple docstring''' return f"./tests/sagemaker/scripts/{self.framework}" @property def __lowercase ( self : Tuple ): '''simple docstring''' if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
65
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"""{test_file} instead.""" ) __SCREAMING_SNAKE_CASE = components[-1] if not test_fn.endswith("py" ): raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith("test_modeling_" ): raise ValueError( f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )] __SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ ) return test_module_path def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ ) return test_module def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] ) if len(lowerCAmelCase_ ) > 0: test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_class() if hasattr(lowerCAmelCase_ , "setUp" ): test.setUp() __SCREAMING_SNAKE_CASE = None if hasattr(lowerCAmelCase_ , "model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __SCREAMING_SNAKE_CASE = test.model_tester.__class__ return model_tester def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: __SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ ) if tester_class is not None: tester_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes} return test_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_test_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_to_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o.__name__ elif isinstance(lowerCAmelCase_ , (list, tuple) ): return [to_json(lowerCAmelCase_ ) for x in o] elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()} else: return o
682
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Optional[int] = "trajectory_transformer" _UpperCamelCase : Optional[int] = ["past_key_values"] _UpperCamelCase : Optional[Any] = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=1_0_0 , _lowerCAmelCase=5 , _lowerCAmelCase=1 , _lowerCAmelCase=1 , _lowerCAmelCase=2_4_9 , _lowerCAmelCase=6 , _lowerCAmelCase=1_7 , _lowerCAmelCase=2_5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=1_2_8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.00_06 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=5_0_2_5_6 , **_lowerCAmelCase , ): _lowercase : Optional[int] = vocab_size _lowercase : int = action_weight _lowercase : Optional[Any] = reward_weight _lowercase : List[Any] = value_weight _lowercase : List[str] = max_position_embeddings _lowercase : Any = block_size _lowercase : List[str] = action_dim _lowercase : str = observation_dim _lowercase : Any = transition_dim _lowercase : Tuple = learning_rate _lowercase : Tuple = n_layer _lowercase : str = n_head _lowercase : Optional[int] = n_embd _lowercase : List[Any] = embd_pdrop _lowercase : str = attn_pdrop _lowercase : Optional[Any] = resid_pdrop _lowercase : Tuple = initializer_range _lowercase : List[str] = layer_norm_eps _lowercase : str = kaiming_initializer_range _lowercase : Any = use_cache super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
66
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase__ (lowerCAmelCase_=None ): '''simple docstring''' if subparsers is not None: __SCREAMING_SNAKE_CASE = subparsers.add_parser("env" ) else: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.__version__ __SCREAMING_SNAKE_CASE = torch.cuda.is_available() __SCREAMING_SNAKE_CASE = is_xpu_available() __SCREAMING_SNAKE_CASE = is_npu_available() __SCREAMING_SNAKE_CASE = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict() __SCREAMING_SNAKE_CASE = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(lowerCAmelCase_ ), "PyTorch NPU available": str(lowerCAmelCase_ ), "System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __SCREAMING_SNAKE_CASE = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) __SCREAMING_SNAKE_CASE = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else f"""\t{accelerate_config}""" ) print(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = accelerate_config return info def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = env_command_parser() __SCREAMING_SNAKE_CASE = parser.parse_args() env_command(lowerCAmelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
682
0
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( UpperCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : torch.FloatTensor SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :List[str]=0.999 , snake_case__ :Dict="cosine" , ) -> List[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ :Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ :Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowercase = [] for i in range(snake_case__ ): _lowercase = i / num_diffusion_timesteps _lowercase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class A_ ( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] ,__A : int = 1000 ,__A : str = "fixed_small_log" ,__A : bool = True ,__A : Optional[float] = 1.0 ,__A : str = "epsilon" ,__A : str = "squaredcos_cap_v2" ,) -> int: if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) _lowercase = betas_for_alpha_bar(__A ) _lowercase = 1.0 - self.betas _lowercase = torch.cumprod(self.alphas ,dim=0 ) _lowercase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowercase = 1.0 # setable values _lowercase = None _lowercase = torch.from_numpy(np.arange(0 ,__A )[::-1].copy() ) _lowercase = variance_type def __UpperCAmelCase ( self : List[str] ,__A : torch.FloatTensor ,__A : Optional[int] = None ) -> torch.FloatTensor: return sample def __UpperCAmelCase ( self : List[str] ,__A : int ,__A : Union[str, torch.device] = None ) -> List[str]: _lowercase = num_inference_steps _lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowercase = (np.arange(0 ,__A ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowercase = torch.from_numpy(__A ).to(__A ) def __UpperCAmelCase ( self : Dict ,__A : List[Any] ,__A : str=None ,__A : str=None ,__A : List[str]=None ) -> List[str]: if prev_timestep is None: _lowercase = t - 1 _lowercase = self.alphas_cumprod[t] _lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowercase = 1 - alpha_prod_t _lowercase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowercase = self.betas[t] else: _lowercase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowercase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowercase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowercase = torch.log(torch.clamp(__A ,min=1e-20 ) ) _lowercase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowercase = variance.log() _lowercase = beta.log() _lowercase = (predicted_variance + 1) / 2 _lowercase = frac * max_log + (1 - frac) * min_log return variance def __UpperCAmelCase ( self : Any ,__A : torch.FloatTensor ,__A : int ,__A : torch.FloatTensor ,__A : Optional[int] = None ,__A : Optional[int]=None ,__A : bool = True ,) -> Union[UnCLIPSchedulerOutput, Tuple]: _lowercase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowercase , _lowercase = torch.split(__A ,sample.shape[1] ,dim=1 ) else: _lowercase = None # 1. compute alphas, betas if prev_timestep is None: _lowercase = t - 1 _lowercase = self.alphas_cumprod[t] _lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowercase = 1 - alpha_prod_t _lowercase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowercase = self.betas[t] _lowercase = self.alphas[t] else: _lowercase = 1 - alpha_prod_t / alpha_prod_t_prev _lowercase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowercase = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowercase = torch.clamp( __A ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowercase = 0 if t > 0: _lowercase = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__A ,device=model_output.device ) _lowercase = self._get_variance( __A ,predicted_variance=__A ,prev_timestep=__A ,) if self.variance_type == "fixed_small_log": _lowercase = variance elif self.variance_type == "learned_range": _lowercase = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ' for the UnCLIPScheduler.' ) _lowercase = variance * variance_noise _lowercase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__A ,pred_original_sample=__A ) def __UpperCAmelCase ( self : Tuple ,__A : torch.FloatTensor ,__A : torch.FloatTensor ,__A : torch.IntTensor ,) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples _lowercase = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowercase = timesteps.to(original_samples.device ) _lowercase = alphas_cumprod[timesteps] ** 0.5 _lowercase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowercase = sqrt_alpha_prod.unsqueeze(-1 ) _lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowercase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
67
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a__ : int = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a__ : Union[str, Any] = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' a__ : Optional[Any] = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def remove_articles(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams] __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for sgram, scount in sgramcounter.items(): __SCREAMING_SNAKE_CASE = scount * numref __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for cgram, ccount in cgramcounter.items(): __SCREAMING_SNAKE_CASE = ccount * numref # KEEP __SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep __SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __SCREAMING_SNAKE_CASE = 0 if keepscore_precision > 0 or keepscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep __SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 if addscore_precision > 0 or addscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = ssent.split(" " ) __SCREAMING_SNAKE_CASE = csent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for rsent in rsents: __SCREAMING_SNAKE_CASE = rsent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4 __SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ): '''simple docstring''' if lowercase: __SCREAMING_SNAKE_CASE = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sentence if not return_str: __SCREAMING_SNAKE_CASE = normalized_sent.split() return normalized_sent def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError("Sources length must match predictions and references lengths." ) __SCREAMING_SNAKE_CASE = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) __SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ ) return 100 * sari_score def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] __SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) return result
682
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __A = logging.get_logger(__name__) class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = ['input_features'] def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=80 , __SCREAMING_SNAKE_CASE : List[Any]=16000 , __SCREAMING_SNAKE_CASE : int=160 , __SCREAMING_SNAKE_CASE : Optional[int]=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Union[str, Any]: super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =n_fft __UpperCAmelCase =hop_length __UpperCAmelCase =chunk_length __UpperCAmelCase =chunk_length * sampling_rate __UpperCAmelCase =self.n_samples // hop_length __UpperCAmelCase =sampling_rate __UpperCAmelCase =mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: __UpperCAmelCase =spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) __UpperCAmelCase =log_spec[:, :-1] __UpperCAmelCase =np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 ) __UpperCAmelCase =(log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _a ( __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : float = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: __UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE , np.intaa ) __UpperCAmelCase =[] for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ): __UpperCAmelCase =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: __UpperCAmelCase =padding_value normed_input_values.append(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __UpperCAmelCase =isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCAmelCase =is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCAmelCase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __UpperCAmelCase =np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCAmelCase =raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCAmelCase =[np.asarray([raw_speech] ).T] __UpperCAmelCase =BatchFeature({"""input_features""": raw_speech} ) # convert into correct format for padding __UpperCAmelCase =self.pad( __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: __UpperCAmelCase =self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) __UpperCAmelCase =np.stack(padded_inputs["""input_features"""] , axis=0 ) # make sure list is in array format __UpperCAmelCase =padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 ) __UpperCAmelCase =[self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]] if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] else: __UpperCAmelCase =input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) __UpperCAmelCase =padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: __UpperCAmelCase =padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE ) return padded_inputs def _a ( self : int ) -> Dict[str, Any]: __UpperCAmelCase =copy.deepcopy(self.__dict__ ) __UpperCAmelCase =self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
68
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : str ) -> Any: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple: __SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str: __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : str = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : int = True def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict: __SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : int ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: # This regression test was failing with PyTorch < 1.3 ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def UpperCAmelCase_ ( self : Optional[int] ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> int: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) ) __SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
682
0
'''simple docstring''' import math def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list: __snake_case = [True] * n __snake_case = False __snake_case = False __snake_case = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): __snake_case = i * 2 while index < n: __snake_case = False __snake_case = index + i __snake_case = [2] for i in range(3 , _UpperCAmelCase , 2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def __UpperCAmelCase ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int: __snake_case = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00 __snake_case = prime_sieve(_UpperCAmelCase ) __snake_case = 0 __snake_case = 0 __snake_case = primes[prime_index] while (last_prime**2) <= limit: __snake_case = primes[prime_index + 1] __snake_case = last_prime**2 __snake_case = next_prime**2 # Get numbers divisible by lps(current) __snake_case = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __snake_case = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __snake_case = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __snake_case = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
69
"""simple docstring""" import os def UpperCAmelCase__ (): '''simple docstring''' with open(os.path.dirname(lowerCAmelCase_ ) + "/p022_names.txt" ) as file: __SCREAMING_SNAKE_CASE = str(file.readlines()[0] ) __SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," ) names.sort() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, name in enumerate(lowerCAmelCase_ ): for letter in name: name_score += ord(lowerCAmelCase_ ) - 64 total_score += (i + 1) * name_score __SCREAMING_SNAKE_CASE = 0 return total_score if __name__ == "__main__": print(solution())
682
0
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
70
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1.5 __SCREAMING_SNAKE_CASE = int(factor * num_class_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: __SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4: break else: __SCREAMING_SNAKE_CASE = int(factor * num_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ ) with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( f"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: __SCREAMING_SNAKE_CASE = class_images[count] count += 1 try: __SCREAMING_SNAKE_CASE = requests.get(images["url"] ) if img.status_code == 200: __SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ ) return parser.parse_args() if __name__ == "__main__": a__ : Optional[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
682
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a__ : str = logging.get_logger(__name__) class UpperCamelCase_ ( enum.Enum): """simple docstring""" snake_case__ : Optional[int] = 0 snake_case__ : Dict = 1 @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = "generated" def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if truncation is not None: __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = generate_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None and return_type is None: __SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __SCREAMING_SNAKE_CASE = return_type if clean_up_tokenization_spaces is not None: __SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces if stop_sequence is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __SCREAMING_SNAKE_CASE = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]: return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , UpperCAmelCase__ ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) __SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],) __SCREAMING_SNAKE_CASE = True elif isinstance(args[0] , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (prefix + args[0],) __SCREAMING_SNAKE_CASE = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) __SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) if ( isinstance(args[0] , UpperCAmelCase__ ) and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] ) and all(len(UpperCAmelCase__ ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple: __SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ ) return inputs def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any: if self.framework == "pt": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape elif self.framework == "tf": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy() __SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length ) __SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) __SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output_ids.shape[0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __SCREAMING_SNAKE_CASE = { F"""{self.return_name}_text""": self.tokenizer.decode( UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ) } records.append(UpperCAmelCase__ ) return records @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "summary" def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "translation" def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]: if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ): return self.tokenizer._build_translation_inputs( *UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ ) else: return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ ) if src_lang is not None: __SCREAMING_SNAKE_CASE = src_lang if tgt_lang is not None: __SCREAMING_SNAKE_CASE = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task ) __SCREAMING_SNAKE_CASE = task.split("_" ) if task and len(UpperCAmelCase__ ) == 4: # translation, XX, to YY __SCREAMING_SNAKE_CASE = items[1] __SCREAMING_SNAKE_CASE = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: _UpperCAmelCase : List[Any] = None _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} _UpperCAmelCase : Optional[int] = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } _UpperCAmelCase : List[str] = { '''facebook/nllb-large-en-ro''': 10_24, '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off _UpperCAmelCase : Dict = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = ['input_ids', 'attention_mask'] UpperCamelCase__ = NllbTokenizer UpperCamelCase__ = [] UpperCamelCase__ = [] def __init__( self , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=False , **snake_case_ , ): # Mask token behave like a normal word, i.e. include the space before it lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token lowercase =legacy_behaviour super().__init__( vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , ) lowercase =vocab_file lowercase =False if not self.vocab_file else True lowercase =FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) lowercase ={ lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase =src_lang if src_lang is not None else '''eng_Latn''' lowercase =self.convert_tokens_to_ids(self._src_lang ) lowercase =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _A( self ): return self._src_lang @src_lang.setter def _A( self , snake_case_ ): lowercase =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _A( self , snake_case_ , snake_case_ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _A( self , snake_case_ , snake_case_ = None ): lowercase =[self.sep_token_id] lowercase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowercase =src_lang lowercase =self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ ) lowercase =self.convert_tokens_to_ids(snake_case_ ) lowercase =tgt_lang_id return inputs def _A( self , snake_case_ , snake_case_ = "eng_Latn" , snake_case_ = None , snake_case_ = "fra_Latn" , **snake_case_ , ): lowercase =src_lang lowercase =tgt_lang return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ ) def _A( self ): return self.set_src_lang_special_tokens(self.src_lang ) def _A( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _A( self , snake_case_ ): lowercase =self.convert_tokens_to_ids(snake_case_ ) if self.legacy_behaviour: lowercase =[] lowercase =[self.eos_token_id, self.cur_lang_code] else: lowercase =[self.cur_lang_code] lowercase =[self.eos_token_id] lowercase =self.convert_ids_to_tokens(self.prefix_tokens ) lowercase =self.convert_ids_to_tokens(self.suffix_tokens ) lowercase =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _A( self , snake_case_ ): lowercase =self.convert_tokens_to_ids(snake_case_ ) if self.legacy_behaviour: lowercase =[] lowercase =[self.eos_token_id, self.cur_lang_code] else: lowercase =[self.cur_lang_code] lowercase =[self.eos_token_id] lowercase =self.convert_ids_to_tokens(self.prefix_tokens ) lowercase =self.convert_ids_to_tokens(self.suffix_tokens ) lowercase =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _A( self , snake_case_ , snake_case_ = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return lowercase =os.path.join( snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
72
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
0
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase) create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return result def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): if sum(_UpperCAmelCase) > max_sum or (remaining_nums_sum + sum(_UpperCAmelCase)) < max_sum: return if sum(_UpperCAmelCase) == max_sum: result.append(_UpperCAmelCase) return for index in range(_UpperCAmelCase , len(_UpperCAmelCase)): create_state_space_tree( _UpperCAmelCase , _UpperCAmelCase , index + 1 , [*path, nums[index]] , _UpperCAmelCase , remaining_nums_sum - nums[index] , ) a_ : Tuple = [3, 34, 4, 12, 5, 2] a_ : str = 9 a_ : Dict = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
73
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=None , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # create attention mask __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.seq_length // 2 __SCREAMING_SNAKE_CASE = 0 # first forward pass __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __SCREAMING_SNAKE_CASE = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1 __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = random_other_next_tokens # append to next input_ids and attn_mask __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , ) # get two different outputs __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) # first forward pass __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[ "last_hidden_state" ] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = BioGptForTokenClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> str: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () snake_case__ : Tuple = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Optional[Any] = False def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : int ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = "left" # Define PAD Token = EOS Token = 50256 __SCREAMING_SNAKE_CASE = tokenizer.eos_token __SCREAMING_SNAKE_CASE = model.config.eos_token_id # use different length sentences to test batching __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little", "Today, I", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs["input_ids"].to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , ) __SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = "multi_label_classification" __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = 4_2_3_8_4 __SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( **UpperCAmelCase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
import math def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : List[str] = 2 __SCREAMING_SNAKE_CASE : Dict = int(math.sqrt(snake_case ) ) # Size of every segment __SCREAMING_SNAKE_CASE : Tuple = [True] * (end + 1) __SCREAMING_SNAKE_CASE : Dict = [] while start <= end: if temp[start] is True: in_prime.append(snake_case ) for i in range(start * start , end + 1 , snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = False start += 1 prime += in_prime __SCREAMING_SNAKE_CASE : str = end + 1 __SCREAMING_SNAKE_CASE : Optional[Any] = min(2 * end , snake_case ) while low <= n: __SCREAMING_SNAKE_CASE : Dict = [True] * (high - low + 1) for each in in_prime: __SCREAMING_SNAKE_CASE : int = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case , high + 1 , snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = False for j in range(len(snake_case ) ): if temp[j] is True: prime.append(j + low ) __SCREAMING_SNAKE_CASE : List[str] = high + 1 __SCREAMING_SNAKE_CASE : List[str] = min(high + end , snake_case ) return prime print(sieve(10**6))
74
"""simple docstring""" import os import pytest from attr import dataclass a__ : int = '''us-east-1''' # defaults region @dataclass class UpperCamelCase_ : """simple docstring""" snake_case__ : str snake_case__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" snake_case__ : Optional[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } snake_case__ : Tuple = {**hyperparameters, "max_steps": 1000} @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self : int ) -> str: return F"""{self.framework}-transfromers-test""" @property def UpperCAmelCase_ ( self : List[Any] ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
682
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'yolos' def __init__( self : Tuple , _A : Any=768 , _A : Optional[int]=12 , _A : Union[str, Any]=12 , _A : List[str]=3_072 , _A : Tuple="gelu" , _A : List[str]=0.0 , _A : Dict=0.0 , _A : Optional[int]=0.0_2 , _A : int=1e-12 , _A : List[Any]=[512, 864] , _A : Tuple=16 , _A : str=3 , _A : Tuple=True , _A : Dict=100 , _A : str=True , _A : Optional[Any]=False , _A : Any=1 , _A : str=5 , _A : Union[str, Any]=2 , _A : Optional[Any]=5 , _A : Optional[Any]=2 , _A : Optional[int]=0.1 , **_A : Any , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Union[str, Any] = layer_norm_eps UpperCAmelCase__ : str = image_size UpperCAmelCase__ : Optional[Any] = patch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : Any = qkv_bias UpperCAmelCase__ : Dict = num_detection_tokens UpperCAmelCase__ : Optional[int] = use_mid_position_embeddings UpperCAmelCase__ : Any = auxiliary_loss # Hungarian matcher UpperCAmelCase__ : int = class_cost UpperCAmelCase__ : Union[str, Any] = bbox_cost UpperCAmelCase__ : Any = giou_cost # Loss coefficients UpperCAmelCase__ : Any = bbox_loss_coefficient UpperCAmelCase__ : List[Any] = giou_loss_coefficient UpperCAmelCase__ : List[Any] = eos_coefficient class lowerCamelCase_ ( __a ): lowerCAmelCase__ = version.parse('1.11' ) @property def lowercase_ ( self : str ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase_ ( self : Any ): '''simple docstring''' return 1e-4 @property def lowercase_ ( self : int ): '''simple docstring''' return 12
75
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging a__ : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any: warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , UpperCAmelCase__ , ) super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ) # create the counting array __SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min __SCREAMING_SNAKE_CASE = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1] # create the output collection __SCREAMING_SNAKE_CASE = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCAmelCase_ ) ): __SCREAMING_SNAKE_CASE = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" a__ : Dict = input('''Enter numbers separated by a comma:\n''').strip() a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
682
0
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a__ ( unittest.TestCase ): lowercase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def a_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : Any = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset") __UpperCAmelCase : List[Any] = VideoClassificationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ , top_k=2) __UpperCAmelCase : Optional[int] = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any]): """simple docstring""" for example in examples: __UpperCAmelCase : List[str] = video_classifier(UpperCamelCase_) self.assertEqual( UpperCamelCase_ , [ {"score": ANY(UpperCamelCase_), "label": ANY(UpperCamelCase_)}, {"score": ANY(UpperCamelCase_), "label": ANY(UpperCamelCase_)}, ] , ) @require_torch def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : str = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" __UpperCAmelCase : Tuple = VideoMAEFeatureExtractor( size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10}) __UpperCAmelCase : str = pipeline( "video-classification" , model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , frame_sampling_rate=4) __UpperCAmelCase : Tuple = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset") __UpperCAmelCase : List[str] = video_classifier(UpperCamelCase_ , top_k=2) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , ) __UpperCAmelCase : Union[str, Any] = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=4) , [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ] , ) @require_tf def a_ ( self : str): """simple docstring""" pass
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
682
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int ) -> None: '''simple docstring''' UpperCAmelCase_ = generate_pascal_triangle(snake_case_ ) for row_idx in range(snake_case_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def lowerCAmelCase_ ( snake_case_ : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) UpperCAmelCase_ = [] for current_row_idx in range(snake_case_ ): UpperCAmelCase_ = populate_current_row(snake_case_ , snake_case_ ) triangle.append(snake_case_ ) return triangle def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> list[int]: '''simple docstring''' UpperCAmelCase_ = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 UpperCAmelCase_ , UpperCAmelCase_ = 1, 1 for current_col_idx in range(1 , snake_case_ ): calculate_current_element( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return current_row def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , ) -> None: '''simple docstring''' UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1] UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx] UpperCAmelCase_ = above_to_left_elt + above_to_right_elt def lowerCAmelCase_ ( snake_case_ : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) UpperCAmelCase_ = [[1]] for row_index in range(1 , snake_case_ ): UpperCAmelCase_ = [0] + result[-1] + [0] UpperCAmelCase_ = row_index + 1 # Calculate the number of distinct elements in a row UpperCAmelCase_ = sum(divmod(snake_case_ , 2 ) ) UpperCAmelCase_ = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() UpperCAmelCase_ = row_first_half + row_second_half result.append(snake_case_ ) return result def lowerCAmelCase_ ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None: UpperCAmelCase_ = f"""{func.__name__}({value})""" UpperCAmelCase_ = timeit(f"""__main__.{call}""" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"""{call:38} -- {timing:.4f} seconds""" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(snake_case_ , snake_case_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
78
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : List[str] = logging.get_logger(__name__) a__ : str = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "xlm-roberta" def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"} else: __SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
682
0
from math import factorial def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> int: '''simple docstring''' # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("""Please enter positive integers for n and k where n >= k""" ) return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( """If a class of 40 students must be arranged into groups of""", f'''4 for group projects, there are {combinations(40, 4)} ways''', """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f'''are {combinations(10, 3)} ways that first, second and''', """third place can be awarded.""", )
79
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ ) return flax_params def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } __SCREAMING_SNAKE_CASE = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __SCREAMING_SNAKE_CASE = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flax_dict[key] __SCREAMING_SNAKE_CASE = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T ) else: __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_flax_param(lowerCAmelCase_ ) if not use_large: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig() __SCREAMING_SNAKE_CASE = PixaStructTextConfig() else: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) __SCREAMING_SNAKE_CASE = PixaStructImageProcessor() __SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) if use_large: __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = True # mkdir if needed os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) print("Model saved in {}".format(lowerCAmelCase_ ) ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') a__ : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
682
0
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP __UpperCamelCase : List[Any] = False try: __UpperCamelCase : int = _is_package_available("""google.colab""") except ModuleNotFoundError: pass @input.register class __UpperCamelCase : def __init__( self : Dict , _lowerCAmelCase : str = None , _lowerCAmelCase : list = [] ) -> Tuple: """simple docstring""" __lowercase = 0 __lowercase = choices __lowercase = prompt if sys.platform == "win32": __lowercase = """*""" else: __lowercase = """➔ """ def _a ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str = "" ) -> str: """simple docstring""" if sys.platform != "win32": writeColor(self.choices[index] , 32 , _lowerCAmelCase ) else: forceWrite(self.choices[index] , _lowerCAmelCase ) def _a ( self : Dict , _lowerCAmelCase : int ) -> Any: """simple docstring""" if index == self.position: forceWrite(F' {self.arrow_char} ' ) self.write_choice(_lowerCAmelCase ) else: forceWrite(F' {self.choices[index]}' ) reset_cursor() def _a ( self : Optional[Any] , _lowerCAmelCase : Direction , _lowerCAmelCase : int = 1 ) -> Dict: """simple docstring""" __lowercase = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(_lowerCAmelCase ) move_cursor(_lowerCAmelCase , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def _a ( self : Any ) -> Dict: """simple docstring""" self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def _a ( self : Tuple ) -> Dict: """simple docstring""" move_cursor(len(self.choices ) - self.position , """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" move_cursor(len(self.choices ) - self.position , """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(_lowerCAmelCase )] for number in range(10 )] ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase = int(chr(self.current_selection ) ) __lowercase = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , _lowerCAmelCase ) else: return else: return def _a ( self : Optional[int] , _lowerCAmelCase : int = 0 ) -> Union[str, Any]: """simple docstring""" if self.prompt: linebreak() forceWrite(self.prompt , """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" ) __lowercase = default_choice for i in range(len(self.choices ) ): self.print_choice(_lowerCAmelCase ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position , """UP""" ) with cursor.hide(): while True: if in_colab: try: __lowercase = int(builtins.input() ) except ValueError: __lowercase = default_choice else: __lowercase = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , """UP""" ) clear_line() self.write_choice(_lowerCAmelCase , """\n""" ) return choice
80
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a__ : Optional[Any] = 1_6 a__ : str = 3_2 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" ) __SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE = 8 else: __SCREAMING_SNAKE_CASE = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a__ : List[Any] = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1": __SCREAMING_SNAKE_CASE = 2 # Initialize accelerator __SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["num_epochs"] ) __SCREAMING_SNAKE_CASE = int(config["seed"] ) __SCREAMING_SNAKE_CASE = int(config["batch_size"] ) __SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate scheduler __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
682
0
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : Union[str, Any] = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __snake_case : Union[str, Any] = n - k # Calculate C(n,k) for i in range(__lowerCamelCase ): result *= n - i result //= i + 1 return result def lowerCAmelCase_ ( __lowerCamelCase ): return binomial_coefficient(2 * node_count , __lowerCamelCase ) // (node_count + 1) def lowerCAmelCase_ ( __lowerCamelCase ): if n < 0: raise ValueError("factorial() not defined for negative values" ) __snake_case : Optional[int] = 1 for i in range(1 , n + 1 ): result *= i return result def lowerCAmelCase_ ( __lowerCamelCase ): return catalan_number(__lowerCamelCase ) * factorial(__lowerCamelCase ) if __name__ == "__main__": _snake_case : List[Any] = int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
81
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a__ : Dict = logging.get_logger(__name__) # General docstring a__ : str = '''RegNetConfig''' # Base docstring a__ : List[str] = '''facebook/regnet-y-040''' a__ : int = [1, 1_0_8_8, 7, 7] # Image classification docstring a__ : int = '''facebook/regnet-y-040''' a__ : str = '''tabby, tabby cat''' a__ : Optional[Any] = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Tuple , ) -> Any: super().__init__(**UpperCAmelCase__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __SCREAMING_SNAKE_CASE = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding="VALID" , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" , ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else tf.identity def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.convolution(self.padding(UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_channels __SCREAMING_SNAKE_CASE = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = shape_list(UpperCAmelCase__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) ) __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ ) class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) __SCREAMING_SNAKE_CASE = [ tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] ) -> Any: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) for layer_module in self.attention: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = hidden_state * pooled return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.2" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str ) -> Any: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[Any] ) -> Any: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.3" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __SCREAMING_SNAKE_CASE = [ # downsampling is done in the first layer with stride of 2 layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name="layers.0" ), *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int ) -> int: for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Any ) -> List[str]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"""stages.{i+1}""" ) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> TFBaseModelOutputWithNoAttention: __SCREAMING_SNAKE_CASE = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) __SCREAMING_SNAKE_CASE = stage_module(UpperCAmelCase__ ) if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) @keras_serializable class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" snake_case__ : Any = RegNetConfig def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = TFRegNetEmbeddings(UpperCAmelCase__ , name="embedder" ) __SCREAMING_SNAKE_CASE = TFRegNetEncoder(UpperCAmelCase__ , name="encoder" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) @unpack_inputs def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = encoder_outputs[0] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) # Change to NCHW output format have uniformity in the modules __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __SCREAMING_SNAKE_CASE = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : List[Any] = RegNetConfig snake_case__ : List[str] = "regnet" snake_case__ : str = "pixel_values" @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} a__ : Union[str, Any] = r''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' a__ : Optional[int] = r''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Tuple: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> Any: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_labels __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) # classification head __SCREAMING_SNAKE_CASE = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1] __SCREAMING_SNAKE_CASE = self.classifier[0](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.classifier[1](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ ) if not return_dict: __SCREAMING_SNAKE_CASE = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
682
0
"""simple docstring""" import os import sys import unittest lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowerCamelCase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") lowerCamelCase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : List[Any] ) -> Any: '''simple docstring''' UpperCAmelCase_ = get_test_to_tester_mapping(_UpperCAmelCase ) UpperCAmelCase_ = get_test_to_tester_mapping(_UpperCAmelCase ) UpperCAmelCase_ = {"BertModelTest": "BertModelTester"} UpperCAmelCase_ = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = get_model_to_test_mapping(_UpperCAmelCase ) UpperCAmelCase_ = get_model_to_test_mapping(_UpperCAmelCase ) UpperCAmelCase_ = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } UpperCAmelCase_ = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def lowercase__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = get_model_to_tester_mapping(_UpperCAmelCase ) UpperCAmelCase_ = get_model_to_tester_mapping(_UpperCAmelCase ) UpperCAmelCase_ = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } UpperCAmelCase_ = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
82
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
682
0
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCAmelCase__ = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1000, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } lowerCAmelCase__ = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1000, '''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } lowerCAmelCase__ = { '''sample_size''': 256, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } lowerCAmelCase__ = { '''num_train_timesteps''': 40, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } lowerCAmelCase__ = { '''num_train_timesteps''': 201, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } lowerCAmelCase__ = { '''num_train_timesteps''': 151, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } def snake_case_ ( A_ : List[str] ): '''simple docstring''' if isinstance(A_, A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any], A_ : Tuple, A_ : Dict, A_ : Optional[Any]=False ): '''simple docstring''' _lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] _lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] _lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] _lowerCamelCase : Any = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] _lowerCamelCase : int = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] _lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] _lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] _lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] _lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] _lowerCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: _lowerCamelCase : Any = checkpoint[F'''{old_prefix}.skip_connection.weight'''] _lowerCamelCase : str = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def snake_case_ ( A_ : Optional[int], A_ : Tuple, A_ : Union[str, Any], A_ : Tuple, A_ : str=None ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 ) _lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.norm.weight'''] _lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.norm.bias'''] _lowerCamelCase : Tuple = weight_q.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase : Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase : int = weight_k.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase : int = bias_k.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase : str = weight_v.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase : int = bias_v.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase : Optional[Any] = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) _lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def snake_case_ ( A_ : str, A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = torch.load(A_, map_location='''cpu''' ) _lowerCamelCase : List[str] = {} _lowerCamelCase : str = checkpoint['''time_embed.0.weight'''] _lowerCamelCase : Tuple = checkpoint['''time_embed.0.bias'''] _lowerCamelCase : str = checkpoint['''time_embed.2.weight'''] _lowerCamelCase : List[str] = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: _lowerCamelCase : List[str] = checkpoint['''label_emb.weight'''] _lowerCamelCase : Any = checkpoint['''input_blocks.0.0.weight'''] _lowerCamelCase : Dict = checkpoint['''input_blocks.0.0.bias'''] _lowerCamelCase : str = unet_config['''down_block_types'''] _lowerCamelCase : int = unet_config['''layers_per_block'''] _lowerCamelCase : Any = unet_config['''attention_head_dim'''] _lowerCamelCase : Optional[int] = unet_config['''block_out_channels'''] _lowerCamelCase : int = 1 _lowerCamelCase : List[Any] = channels_list[0] for i, layer_type in enumerate(A_ ): _lowerCamelCase : Tuple = channels_list[i] _lowerCamelCase : int = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(A_ ): _lowerCamelCase : Union[str, Any] = F'''down_blocks.{i}.resnets.{j}''' _lowerCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0''' _lowerCamelCase : int = True if j == 0 and downsample_block_has_skip else False _lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(A_ ): _lowerCamelCase : Optional[Any] = F'''down_blocks.{i}.resnets.{j}''' _lowerCamelCase : int = F'''input_blocks.{current_layer}.0''' _lowerCamelCase : List[str] = True if j == 0 and downsample_block_has_skip else False _lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) _lowerCamelCase : Tuple = F'''down_blocks.{i}.attentions.{j}''' _lowerCamelCase : Any = F'''input_blocks.{current_layer}.1''' _lowerCamelCase : Optional[Any] = convert_attention( A_, A_, A_, A_, A_ ) current_layer += 1 if i != len(A_ ) - 1: _lowerCamelCase : Dict = F'''down_blocks.{i}.downsamplers.0''' _lowerCamelCase : Any = F'''input_blocks.{current_layer}.0''' _lowerCamelCase : List[Any] = convert_resnet(A_, A_, A_, A_ ) current_layer += 1 _lowerCamelCase : Tuple = current_channels # hardcoded the mid-block for now _lowerCamelCase : Union[str, Any] = '''mid_block.resnets.0''' _lowerCamelCase : Dict = '''middle_block.0''' _lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_ ) _lowerCamelCase : List[str] = '''mid_block.attentions.0''' _lowerCamelCase : Dict = '''middle_block.1''' _lowerCamelCase : Union[str, Any] = convert_attention(A_, A_, A_, A_, A_ ) _lowerCamelCase : Tuple = '''mid_block.resnets.1''' _lowerCamelCase : Optional[int] = '''middle_block.2''' _lowerCamelCase : Any = convert_resnet(A_, A_, A_, A_ ) _lowerCamelCase : str = 0 _lowerCamelCase : Any = unet_config['''up_block_types'''] for i, layer_type in enumerate(A_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowerCamelCase : str = F'''up_blocks.{i}.resnets.{j}''' _lowerCamelCase : List[str] = F'''output_blocks.{current_layer}.0''' _lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) current_layer += 1 if i != len(A_ ) - 1: _lowerCamelCase : Optional[Any] = F'''up_blocks.{i}.upsamplers.0''' _lowerCamelCase : int = F'''output_blocks.{current_layer-1}.1''' _lowerCamelCase : str = convert_resnet(A_, A_, A_, A_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowerCamelCase : Any = F'''up_blocks.{i}.resnets.{j}''' _lowerCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0''' _lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) _lowerCamelCase : str = F'''up_blocks.{i}.attentions.{j}''' _lowerCamelCase : Tuple = F'''output_blocks.{current_layer}.1''' _lowerCamelCase : int = convert_attention( A_, A_, A_, A_, A_ ) current_layer += 1 if i != len(A_ ) - 1: _lowerCamelCase : Any = F'''up_blocks.{i}.upsamplers.0''' _lowerCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2''' _lowerCamelCase : List[str] = convert_resnet(A_, A_, A_, A_ ) _lowerCamelCase : str = checkpoint['''out.0.weight'''] _lowerCamelCase : Any = checkpoint['''out.0.bias'''] _lowerCamelCase : int = checkpoint['''out.2.weight'''] _lowerCamelCase : Optional[Any] = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = strabool(args.class_cond) lowerCAmelCase__ = os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCAmelCase__ = TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: lowerCAmelCase__ = None lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config) lowerCAmelCase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCAmelCase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config) lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
83
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = r''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]: __SCREAMING_SNAKE_CASE = max_length __SCREAMING_SNAKE_CASE = max_position_embeddings @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool: __SCREAMING_SNAKE_CASE = input_ids.shape[-1] __SCREAMING_SNAKE_CASE = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = start_length __SCREAMING_SNAKE_CASE = max_new_tokens __SCREAMING_SNAKE_CASE = start_length + max_new_tokens @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict: __SCREAMING_SNAKE_CASE = max_time __SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool: return time.time() - self.initial_timestamp > self.max_time class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool: return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self ) @property def UpperCAmelCase_ ( self : Any ) -> Optional[int]: for stopping_criterium in self: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length return None def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = stopping_criteria.max_length __SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) ) return new_stopping_criteria
682
0
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): if not head: return True # split the list to two parts lowercase , lowercase = head.next, head while fast and fast.next: lowercase = fast.next.next lowercase = slow.next lowercase = slow.next lowercase = None # Don't forget here! But forget still works! # reverse the second part lowercase = None while second: lowercase = second.next lowercase = node lowercase = second lowercase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False lowercase = node.next lowercase = head.next return True def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): if not head or not head.next: return True # 1. Get the midpoint (slow) lowercase = lowercase = lowercase = head while fast and fast.next: lowercase , lowercase = fast.next.next, slow.next # 2. Push the second half into the stack lowercase = [slow.val] while slow.next: lowercase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False lowercase = cur.next return True def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): if not head or not head.next: return True lowercase = {} lowercase = 0 while head: if head.val in d: d[head.val].append(__SCREAMING_SNAKE_CASE ) else: lowercase = [pos] lowercase = head.next pos += 1 lowercase = pos - 1 lowercase = 0 for v in d.values(): if len(__SCREAMING_SNAKE_CASE ) % 2 != 0: middle += 1 else: lowercase = 0 for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ): if v[i] + v[len(__SCREAMING_SNAKE_CASE ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
84
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : int = RoCBertTokenizer snake_case__ : int = None snake_case__ : Optional[Any] = False snake_case__ : int = True snake_case__ : Any = filter_non_english def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: super().setUp() __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} for i, value in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> List[str]: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase_ ( self : str ) -> List[str]: __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __SCREAMING_SNAKE_CASE = {} for i, token in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase_ ( self : List[Any] ) -> str: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase_ ( self : List[str] ) -> Tuple: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False __SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "Allen"), ((2_1, 2_3), "##NL"), ((2_3, 2_4), "##P"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "allen"), ((2_1, 2_3), "##nl"), ((2_3, 2_4), "##p"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = ["的", "人", "有"] __SCREAMING_SNAKE_CASE = "".join(UpperCAmelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ ) ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def UpperCAmelCase_ ( self : str ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __SCREAMING_SNAKE_CASE = "你好,你是谁" __SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size SCREAMING_SNAKE_CASE__ : str = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = num_labels SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : str = len(a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_config() return config, pixel_values, labels def __lowercase( self : str )-> str: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowercase_ = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self ) SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __lowercase( self : Any )-> List[Any]: """simple docstring""" pass def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ): SCREAMING_SNAKE_CASE__ : Any = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(a_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE__ : List[Any] = layer_type SCREAMING_SNAKE_CASE__ : Union[str, Any] = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(a_ , a_ , a_ ) def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ): SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple() def recursive_check(a_ : List[Any] , a_ : int ): if isinstance(a_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ): recursive_check(a_ , a_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(a_ , a_ ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(a_ , a_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) def __lowercase( self : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def __lowercase( self : Any )-> List[str]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : List[Any] )-> int: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : Any = prepare_img() SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
85
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[int] = "vivit" def __init__( self : Dict , UpperCAmelCase__ : Dict=2_2_4 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : str=[2, 1_6, 1_6] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : Optional[int]="gelu_fast" , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : str=1E-06 , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Any , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_frames __SCREAMING_SNAKE_CASE = tubelet_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = qkv_bias super().__init__(**UpperCAmelCase__ )
682
0
from copy import deepcopy class _a : """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : list[int] | None = None , UpperCAmelCase : int | None = None ): if arr is None and size is not None: A_ = size A_ = [0] * size elif arr is not None: self.init(UpperCAmelCase ) else: raise ValueError("Either arr or size must be specified" ) def __A ( self : Optional[int] , UpperCAmelCase : list[int] ): A_ = len(UpperCAmelCase ) A_ = deepcopy(UpperCAmelCase ) for i in range(1 , self.size ): A_ = self.next_(UpperCAmelCase ) if j < self.size: self.tree[j] += self.tree[i] def __A ( self : Optional[Any] ): A_ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): A_ = self.next_(UpperCAmelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __A ( UpperCAmelCase : int ): return index + (index & (-index)) @staticmethod def __A ( UpperCAmelCase : int ): return index - (index & (-index)) def __A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value A_ = self.next_(UpperCAmelCase ) def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int ): self.add(UpperCAmelCase , value - self.get(UpperCAmelCase ) ) def __A ( self : Optional[int] , UpperCAmelCase : int ): if right == 0: return 0 A_ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] A_ = self.prev(UpperCAmelCase ) return result def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : int ): return self.prefix(UpperCAmelCase ) - self.prefix(UpperCAmelCase ) def __A ( self : Union[str, Any] , UpperCAmelCase : int ): return self.query(UpperCAmelCase , index + 1 ) def __A ( self : Dict , UpperCAmelCase : int ): value -= self.tree[0] if value < 0: return -1 A_ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 A_ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" import numpy as np from transformers import Pipeline def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = np.max(lowerCAmelCase_ , axis=-1 , keepdims=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def UpperCAmelCase_ ( self : Tuple , **UpperCAmelCase__ : str ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if "second_text" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["second_text"] return preprocess_kwargs, {}, {} def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None ) -> str: return self.tokenizer(UpperCAmelCase__ , text_pair=UpperCAmelCase__ , return_tensors=self.framework ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: return self.model(**UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = model_outputs.logits[0].numpy() __SCREAMING_SNAKE_CASE = softmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.model.config.idalabel[best_class] __SCREAMING_SNAKE_CASE = probabilities[best_class].item() __SCREAMING_SNAKE_CASE = logits.tolist() return {"label": label, "score": score, "logits": logits}
682
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCamelCase : Any = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"""{test_file} instead.""" ) __SCREAMING_SNAKE_CASE = components[-1] if not test_fn.endswith("py" ): raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith("test_modeling_" ): raise ValueError( f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )] __SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ ) return test_module_path def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ ) return test_module def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ ) for attr in dir(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] ) if len(lowerCAmelCase_ ) > 0: test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = test_class() if hasattr(lowerCAmelCase_ , "setUp" ): test.setUp() __SCREAMING_SNAKE_CASE = None if hasattr(lowerCAmelCase_ , "model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __SCREAMING_SNAKE_CASE = test.model_tester.__class__ return model_tester def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: __SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ ) if tester_class is not None: tester_classes.append(lowerCAmelCase_ ) # sort with class names return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes} return test_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_test_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = { model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes } return model_to_tester_mapping def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return o.__name__ elif isinstance(lowerCAmelCase_ , (list, tuple) ): return [to_json(lowerCAmelCase_ ) for x in o] elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()} else: return o
682
0
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file UpperCAmelCase = """Run commands across TPU VMs for initial setup before running `accelerate launch`.""" def _snake_case ( __snake_case : Tuple=None ): """simple docstring""" if subparsers is not None: _lowerCamelCase : str = subparsers.add_parser("""tpu-config""" , description=_description ) else: _lowerCamelCase : Union[str, Any] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments _lowerCamelCase : Optional[int] = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=__snake_case , default=__snake_case , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=__snake_case , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=__snake_case , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) _lowerCamelCase : List[Any] = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=__snake_case , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=__snake_case ) return parser def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : str = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__snake_case ): _lowerCamelCase : Optional[int] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: _lowerCamelCase : Dict = defaults.command_file if not args.command and defaults.commands is not None: _lowerCamelCase : List[Any] = defaults.commands if not args.tpu_name: _lowerCamelCase : Optional[Any] = defaults.tpu_name if not args.tpu_zone: _lowerCamelCase : Optional[Any] = defaults.tpu_zone if args.accelerate_version == "dev": _lowerCamelCase : Optional[int] = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": _lowerCamelCase : Dict = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) , __snake_case ): _lowerCamelCase : Any = F'accelerate=={args.accelerate_version}' if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: _lowerCamelCase : Dict = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , __snake_case ): _lowerCamelCase : Tuple = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate _lowerCamelCase : Optional[int] = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [F'pip install {args.accelerate_version}'] new_cmd += args.command _lowerCamelCase : List[str] = """; """.join(__snake_case ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess _lowerCamelCase : Dict = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F'Running {" ".join(__snake_case )}' ) return subprocess.run(__snake_case ) print("""Successfully setup pod.""" ) def _snake_case ( ): """simple docstring""" _lowerCamelCase : Optional[int] = tpu_command_parser() _lowerCamelCase : Optional[Any] = parser.parse_args() tpu_command_launcher(__snake_case )
88
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase__ (lowerCAmelCase_=None ): '''simple docstring''' if subparsers is not None: __SCREAMING_SNAKE_CASE = subparsers.add_parser("env" ) else: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.__version__ __SCREAMING_SNAKE_CASE = torch.cuda.is_available() __SCREAMING_SNAKE_CASE = is_xpu_available() __SCREAMING_SNAKE_CASE = is_npu_available() __SCREAMING_SNAKE_CASE = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict() __SCREAMING_SNAKE_CASE = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(lowerCAmelCase_ ), "PyTorch NPU available": str(lowerCAmelCase_ ), "System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __SCREAMING_SNAKE_CASE = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) __SCREAMING_SNAKE_CASE = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else f"""\t{accelerate_config}""" ) print(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = accelerate_config return info def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = env_command_parser() __SCREAMING_SNAKE_CASE = parser.parse_args() env_command(lowerCAmelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
682
0
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py SCREAMING_SNAKE_CASE : Optional[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE : str = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) SCREAMING_SNAKE_CASE : Any = spec.loader.load_module() SCREAMING_SNAKE_CASE : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` SCREAMING_SNAKE_CASE : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") SCREAMING_SNAKE_CASE : Dict = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def UpperCamelCase_( ) -> List[Any]: _lowercase : Optional[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): _lowercase : Any = False # source code of `config_class` _lowercase : str = inspect.getsource(lowerCamelCase_ ) _lowercase : Optional[Any] = _re_checkpoint.findall(lowerCamelCase_ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _lowercase , _lowercase : List[str] = checkpoint # verify the checkpoint name corresponds to the checkpoint link _lowercase : Union[str, Any] = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: _lowercase : List[Any] = True break _lowercase : List[Any] = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: _lowercase : Union[str, Any] = '\n'.join(sorted(lowerCamelCase_ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
89
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a__ : int = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a__ : Union[str, Any] = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' a__ : Optional[Any] = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def remove_articles(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams] __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for sgram, scount in sgramcounter.items(): __SCREAMING_SNAKE_CASE = scount * numref __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for cgram, ccount in cgramcounter.items(): __SCREAMING_SNAKE_CASE = ccount * numref # KEEP __SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep __SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __SCREAMING_SNAKE_CASE = 0 if keepscore_precision > 0 or keepscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep __SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 if addscore_precision > 0 or addscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = ssent.split(" " ) __SCREAMING_SNAKE_CASE = csent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for rsent in rsents: __SCREAMING_SNAKE_CASE = rsent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4 __SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ): '''simple docstring''' if lowercase: __SCREAMING_SNAKE_CASE = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sentence if not return_str: __SCREAMING_SNAKE_CASE = normalized_sent.split() return normalized_sent def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError("Sources length must match predictions and references lengths." ) __SCREAMING_SNAKE_CASE = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) __SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ ) return 100 * sari_score def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] __SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) return result
682
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( A ) -> str: lowerCAmelCase__ = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowerCAmelCase__ = [144, 192, 240] lowerCAmelCase__ = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowerCAmelCase__ = [96, 120, 144] lowerCAmelCase__ = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowerCAmelCase__ = [64, 80, 96] lowerCAmelCase__ = [16, 16, 24, 48, 64, 80, 320] lowerCAmelCase__ = 0.05 lowerCAmelCase__ = 2.0 if mobilevit_name.startswith('''deeplabv3_''' ): lowerCAmelCase__ = 512 lowerCAmelCase__ = 16 lowerCAmelCase__ = 21 lowerCAmelCase__ = '''pascal-voc-id2label.json''' else: lowerCAmelCase__ = 1000 lowerCAmelCase__ = '''imagenet-1k-id2label.json''' lowerCAmelCase__ = '''huggingface/label-files''' lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} return config def _snake_case ( A , A=False ) -> str: for i in range(1 , 6 ): if F"""layer_{i}.""" in name: lowerCAmelCase__ = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: lowerCAmelCase__ = name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: lowerCAmelCase__ = name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: lowerCAmelCase__ = name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: lowerCAmelCase__ = name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: lowerCAmelCase__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: lowerCAmelCase__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: lowerCAmelCase__ = name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: lowerCAmelCase__ = name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: lowerCAmelCase__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowerCAmelCase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowerCAmelCase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: lowerCAmelCase__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: lowerCAmelCase__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: lowerCAmelCase__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: lowerCAmelCase__ = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' ) if F""".global_rep.{i}.bias""" in name: lowerCAmelCase__ = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' ) if ".global_rep." in name: lowerCAmelCase__ = name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: lowerCAmelCase__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: lowerCAmelCase__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: lowerCAmelCase__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: lowerCAmelCase__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: lowerCAmelCase__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: lowerCAmelCase__ = name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: lowerCAmelCase__ = name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: lowerCAmelCase__ = name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: lowerCAmelCase__ = name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: lowerCAmelCase__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: lowerCAmelCase__ = name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): lowerCAmelCase__ = '''mobilevit.''' + name return name def _snake_case ( A , A , A=False ) -> Tuple: if base_model: lowerCAmelCase__ = '''''' else: lowerCAmelCase__ = '''mobilevit.''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(A ) if key[:8] == "encoder.": lowerCAmelCase__ = key[8:] if "qkv" in key: lowerCAmelCase__ = key.split('''.''' ) lowerCAmelCase__ = int(key_split[0][6:] ) - 1 lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) lowerCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowerCAmelCase__ = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = val return orig_state_dict def _snake_case ( ) -> Dict: lowerCAmelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase__ = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def _snake_case ( A , A , A , A=False ) -> int: lowerCAmelCase__ = get_mobilevit_config(A ) # load original state_dict lowerCAmelCase__ = torch.load(A , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): lowerCAmelCase__ = MobileViTForSemanticSegmentation(A ).eval() else: lowerCAmelCase__ = MobileViTForImageClassification(A ).eval() lowerCAmelCase__ = convert_state_dict(A , A ) model.load_state_dict(A ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCAmelCase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase__ = model(**A ) lowerCAmelCase__ = outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowerCAmelCase__ = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowerCAmelCase__ = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowerCAmelCase__ = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": lowerCAmelCase__ = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": lowerCAmelCase__ = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": lowerCAmelCase__ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , A , atol=1E-4 ) Path(A ).mkdir(exist_ok=A ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A ) if push_to_hub: lowerCAmelCase__ = { '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) lowerCAmelCase__ = model_mapping[mobilevit_name] image_processor.push_to_hub(A , organization='''apple''' ) model.push_to_hub(A , organization='''apple''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __UpperCAmelCase = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
90
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : str ) -> Any: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple: __SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str: __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : str = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : int = True def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict: __SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : int ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: # This regression test was failing with PyTorch < 1.3 ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def UpperCAmelCase_ ( self : Optional[int] ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> int: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) ) __SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
682
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
"""simple docstring""" import os def UpperCAmelCase__ (): '''simple docstring''' with open(os.path.dirname(lowerCAmelCase_ ) + "/p022_names.txt" ) as file: __SCREAMING_SNAKE_CASE = str(file.readlines()[0] ) __SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," ) names.sort() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, name in enumerate(lowerCAmelCase_ ): for letter in name: name_score += ord(lowerCAmelCase_ ) - 64 total_score += (i + 1) * name_score __SCREAMING_SNAKE_CASE = 0 return total_score if __name__ == "__main__": print(solution())
682
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1.5 __SCREAMING_SNAKE_CASE = int(factor * num_class_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: __SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4: break else: __SCREAMING_SNAKE_CASE = int(factor * num_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ ) with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( f"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: __SCREAMING_SNAKE_CASE = class_images[count] count += 1 try: __SCREAMING_SNAKE_CASE = requests.get(images["url"] ) if img.status_code == 200: __SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ ) return parser.parse_args() if __name__ == "__main__": a__ : Optional[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
682
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = {"""vocab_file""": """sentencepiece.bpe.model"""} __A = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __A = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __A = """▁""" class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :str = VOCAB_FILES_NAMES __magic_name__ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ :Tuple = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token lowerCAmelCase__ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) lowerCAmelCase__ :Dict = vocab_file lowerCAmelCase__ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) lowerCAmelCase__ :Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} lowerCAmelCase__ :Tuple = len(self.sp_model ) - 1 lowerCAmelCase__ :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ :int = [self.cls_token_id] lowerCAmelCase__ :Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = [self.sep_token_id] lowerCAmelCase__ :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case ( self ): '''simple docstring''' return len(self.sp_model ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ :Any = self.sp_model.PieceToId(__UpperCAmelCase ) return spm_id if spm_id else self.unk_token_id def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = [] lowerCAmelCase__ :str = '' lowerCAmelCase__ :Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ :List[str] = True lowerCAmelCase__ :List[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def __getstate__( self ): '''simple docstring''' lowerCAmelCase__ :int = self.__dict__.copy() lowerCAmelCase__ :List[str] = None return state def __setstate__( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase__ :List[Any] = {} lowerCAmelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase__ :Tuple = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , 'wb' ) as fi: lowerCAmelCase__ :List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
93
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a__ : str = logging.get_logger(__name__) class UpperCamelCase_ ( enum.Enum): """simple docstring""" snake_case__ : Optional[int] = 0 snake_case__ : Dict = 1 @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = "generated" def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if truncation is not None: __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = generate_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None and return_type is None: __SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __SCREAMING_SNAKE_CASE = return_type if clean_up_tokenization_spaces is not None: __SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces if stop_sequence is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __SCREAMING_SNAKE_CASE = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]: return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , UpperCAmelCase__ ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) __SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],) __SCREAMING_SNAKE_CASE = True elif isinstance(args[0] , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (prefix + args[0],) __SCREAMING_SNAKE_CASE = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) __SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) if ( isinstance(args[0] , UpperCAmelCase__ ) and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] ) and all(len(UpperCAmelCase__ ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple: __SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ ) return inputs def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any: if self.framework == "pt": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape elif self.framework == "tf": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy() __SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length ) __SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) __SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output_ids.shape[0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __SCREAMING_SNAKE_CASE = { F"""{self.return_name}_text""": self.tokenizer.decode( UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ) } records.append(UpperCAmelCase__ ) return records @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "summary" def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "translation" def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]: if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ): return self.tokenizer._build_translation_inputs( *UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ ) else: return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ ) if src_lang is not None: __SCREAMING_SNAKE_CASE = src_lang if tgt_lang is not None: __SCREAMING_SNAKE_CASE = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task ) __SCREAMING_SNAKE_CASE = task.split("_" ) if task and len(UpperCAmelCase__ ) == 4: # translation, XX, to YY __SCREAMING_SNAKE_CASE = items[1] __SCREAMING_SNAKE_CASE = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
'''simple docstring''' SCREAMING_SNAKE_CASE = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
94
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
0
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase_ (__A ): def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: UpperCAmelCase_ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase_ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowerCAmelCase_ , "num_attention_heads" ) ) class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=64 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Any=[128, 256, 384] , lowerCAmelCase_ : int=[4, 6, 8] , lowerCAmelCase_ : Optional[int]=[2, 3, 4] , lowerCAmelCase_ : List[Any]=[16, 16, 16] , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : int=[2, 2, 2] , lowerCAmelCase_ : List[Any]=[2, 2, 2] , lowerCAmelCase_ : str=0.0_2 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=2 , ) -> Any: UpperCAmelCase_ : str = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : List[Any] = kernel_size UpperCAmelCase_ : List[Any] = stride UpperCAmelCase_ : Optional[int] = padding UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = depths UpperCAmelCase_ : Union[str, Any] = key_dim UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : str = attention_ratio UpperCAmelCase_ : str = mlp_ratio UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : Any = initializer_range def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = LevitModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Any = model(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = image_size[0], image_size[1] for _ in range(4 ): UpperCAmelCase_ : List[str] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) UpperCAmelCase_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[Any] = LevitForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Dict = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : str = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs UpperCAmelCase_ : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : str = LevitModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: return @unittest.skip(reason="Levit does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: pass @unittest.skip(reason="Levit does not output attentions" ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: pass def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : int = [*signature.parameters.keys()] UpperCAmelCase_ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ): UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) UpperCAmelCase_ : str = outputs.hidden_states UpperCAmelCase_ : List[str] = len(self.model_tester.depths ) + 1 self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = (self.model_tester.image_size, self.model_tester.image_size) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = image_size[0], image_size[1] for _ in range(4 ): UpperCAmelCase_ : Dict = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) UpperCAmelCase_ : List[Any] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[str] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : str = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: if not self.model_tester.is_training: return UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowerCAmelCase_ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue UpperCAmelCase_ : int = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() UpperCAmelCase_ : Dict = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase_ : Tuple = False UpperCAmelCase_ : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ ) model.gradient_checkpointing_enable() model.to(lowerCAmelCase_ ) model.train() UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowerCAmelCase_ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): UpperCAmelCase_ : int = problem_type["title"] UpperCAmelCase_ : int = problem_type["num_labels"] UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() UpperCAmelCase_ : int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if problem_type["num_labels"] > 1: UpperCAmelCase_ : Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) UpperCAmelCase_ : str = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list: UpperCAmelCase_ : Dict = model(**lowerCAmelCase_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LevitModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> str: UpperCAmelCase_ : Optional[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowerCAmelCase_ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : Dict = prepare_img() UpperCAmelCase_ : int = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
95
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=None , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # create attention mask __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.seq_length // 2 __SCREAMING_SNAKE_CASE = 0 # first forward pass __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __SCREAMING_SNAKE_CASE = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1 __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = random_other_next_tokens # append to next input_ids and attn_mask __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , ) # get two different outputs __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) # first forward pass __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[ "last_hidden_state" ] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = BioGptForTokenClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> str: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () snake_case__ : Tuple = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Optional[Any] = False def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : int ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = "left" # Define PAD Token = EOS Token = 50256 __SCREAMING_SNAKE_CASE = tokenizer.eos_token __SCREAMING_SNAKE_CASE = model.config.eos_token_id # use different length sentences to test batching __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little", "Today, I", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs["input_ids"].to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , ) __SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = "multi_label_classification" __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = 4_2_3_8_4 __SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( **UpperCAmelCase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
0
"""simple docstring""" import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __lowerCamelCase = '.' if __name__ == "__main__": __lowerCamelCase = os.path.join(REPO_PATH, 'utils/documentation_tests.txt') __lowerCamelCase = [] __lowerCamelCase = [] with open(doctest_file_path) as fp: for line in fp: __lowerCamelCase = line.strip() __lowerCamelCase = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __lowerCamelCase = '\n'.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
96
"""simple docstring""" import os import pytest from attr import dataclass a__ : int = '''us-east-1''' # defaults region @dataclass class UpperCamelCase_ : """simple docstring""" snake_case__ : str snake_case__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" snake_case__ : Optional[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } snake_case__ : Tuple = {**hyperparameters, "max_steps": 1000} @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self : int ) -> str: return F"""{self.framework}-transfromers-test""" @property def UpperCAmelCase_ ( self : List[Any] ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
682
0
from typing import Dict, Optional import numpy as np import datasets __a = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' __a = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' __a = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def a ( snake_case__: Optional[Any] , snake_case__: str , snake_case__: Optional[int] , snake_case__: bool , snake_case__: Optional[Dict[int, int]] = None , snake_case__: bool = False , ): '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): lowercase_ = new_id # turn into Numpy arrays lowercase_ = np.array(snake_case__ ) lowercase_ = np.array(snake_case__ ) if reduce_labels: lowercase_ = 255 lowercase_ = label - 1 lowercase_ = 255 lowercase_ = label != ignore_index lowercase_ = np.not_equal(snake_case__ , snake_case__ ) lowercase_ = pred_label[mask] lowercase_ = np.array(snake_case__ )[mask] lowercase_ = pred_label[pred_label == label] lowercase_ = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1) )[0] lowercase_ = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1) )[0] lowercase_ = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1) )[0] lowercase_ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def a ( snake_case__: int , snake_case__: Optional[int] , snake_case__: Any , snake_case__: bool , snake_case__: Optional[Dict[int, int]] = None , snake_case__: bool = False , ): '''simple docstring''' lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(snake_case__ , snake_case__ ): lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def a ( snake_case__: Union[str, Any] , snake_case__: Any , snake_case__: List[Any] , snake_case__: bool , snake_case__: Optional[int] = None , snake_case__: Optional[Dict[int, int]] = None , snake_case__: bool = False , ): '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # compute metrics lowercase_ = {} lowercase_ = total_area_intersect.sum() / total_area_label.sum() lowercase_ = total_area_intersect / total_area_union lowercase_ = total_area_intersect / total_area_label lowercase_ = np.nanmean(snake_case__ ) lowercase_ = np.nanmean(snake_case__ ) lowercase_ = all_acc lowercase_ = iou lowercase_ = acc if nan_to_num is not None: lowercase_ = {metric: np.nan_to_num(snake_case__ , nan=snake_case__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase__( datasets.Metric ): """simple docstring""" def _lowercase ( self : str ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> str: lowercase_ = mean_iou( results=SCREAMING_SNAKE_CASE_ , gt_seg_maps=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , ignore_index=SCREAMING_SNAKE_CASE_ , nan_to_num=SCREAMING_SNAKE_CASE_ , label_map=SCREAMING_SNAKE_CASE_ , reduce_labels=SCREAMING_SNAKE_CASE_ , ) return iou_result
97
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging a__ : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any: warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , UpperCAmelCase__ , ) super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
682
0
'''simple docstring''' import functools def a__ ( lowercase : list[int], lowercase : list[int] ) -> int: """simple docstring""" if not isinstance(lowercase, lowercase ) or not all(isinstance(lowercase, lowercase ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(lowercase ) != 3 or not all(isinstance(lowercase, lowercase ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(lowercase ) >= 366: raise ValueError('''All days elements should be less than 366''' ) _UpperCamelCase = set(lowercase ) @functools.cache def dynamic_programming(lowercase : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
98
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ) # create the counting array __SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min __SCREAMING_SNAKE_CASE = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1] # create the output collection __SCREAMING_SNAKE_CASE = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCAmelCase_ ) ): __SCREAMING_SNAKE_CASE = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" a__ : Dict = input('''Enter numbers separated by a comma:\n''').strip() a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
682
0
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self , __A , __A ): return f'''gaussian_noise_s={seed}_shape={'_'.join([str(__A ) for s in shape] )}.npy''' def snake_case_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case_ ( self , __A=0 , __A=(4, 4, 64, 64) , __A=False ): __a = jnp.bfloataa if fpaa else jnp.floataa __a = jnp.array(load_hf_numpy(self.get_file_format(__A , __A ) ) , dtype=__A ) return image def snake_case_ ( self , __A=False , __A="CompVis/stable-diffusion-v1-4" ): __a = jnp.bfloataa if fpaa else jnp.floataa __a = """bf16""" if fpaa else None __a , __a = FlaxUNetaDConditionModel.from_pretrained( __A , subfolder="""unet""" , dtype=__A , revision=__A ) return model, params def snake_case_ ( self , __A=0 , __A=(4, 77, 768) , __A=False ): __a = jnp.bfloataa if fpaa else jnp.floataa __a = jnp.array(load_hf_numpy(self.get_file_format(__A , __A ) ) , dtype=__A ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case_ ( self , __A , __A , __A ): __a , __a = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=__A ) __a = self.get_latents(__A , fpaa=__A ) __a = self.get_encoder_hidden_states(__A , fpaa=__A ) __a = model.apply( {"""params""": params} , __A , jnp.array(__A , dtype=jnp.intaa ) , encoder_hidden_states=__A , ).sample assert sample.shape == latents.shape __a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __a = jnp.array(__A , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__A , __A , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case_ ( self , __A , __A , __A ): __a , __a = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=__A ) __a = self.get_latents(__A , shape=(4, 4, 96, 96) , fpaa=__A ) __a = self.get_encoder_hidden_states(__A , shape=(4, 77, 1024) , fpaa=__A ) __a = model.apply( {"""params""": params} , __A , jnp.array(__A , dtype=jnp.intaa ) , encoder_hidden_states=__A , ).sample assert sample.shape == latents.shape __a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __a = jnp.array(__A , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__A , __A , atol=1E-2 )
99
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
682
0