code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters a = (7_2_0, 1_2_8_0) # Height, Width a = (0.4, 0.6) # if height or width lower than this scale, drop it. a = 1 / 1_0_0 a = '' a = '' a = '' a = 2_5_0 def UpperCAmelCase_ ( ): lowercase_ , lowercase_ = get_dataset(UpperCAmelCase__ , UpperCAmelCase__ ) for index in range(UpperCAmelCase__ ): lowercase_ = random.sample(range(len(UpperCAmelCase__ ) ) , 4 ) lowercase_ , lowercase_ , lowercase_ = update_image_and_anno( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , filter_scale=UpperCAmelCase__ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowercase_ = random_chars(3_2 ) lowercase_ = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] lowercase_ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(F'''{file_root}.jpg''' , UpperCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) lowercase_ = [] for anno in new_annos: lowercase_ = anno[3] - anno[1] lowercase_ = anno[4] - anno[2] lowercase_ = anno[1] + width / 2 lowercase_ = anno[2] + height / 2 lowercase_ = F'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(UpperCAmelCase__ ) with open(F'''{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = [] lowercase_ = [] for label_file in glob.glob(os.path.join(UpperCAmelCase__ , """*.txt""" ) ): lowercase_ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(UpperCAmelCase__ ) as in_file: lowercase_ = in_file.readlines() lowercase_ = os.path.join(UpperCAmelCase__ , F'''{label_name}.jpg''' ) lowercase_ = [] for obj_list in obj_lists: lowercase_ = obj_list.rstrip("""\n""" ).split(""" """ ) lowercase_ = float(obj[1] ) - float(obj[3] ) / 2 lowercase_ = float(obj[2] ) - float(obj[4] ) / 2 lowercase_ = float(obj[1] ) + float(obj[3] ) / 2 lowercase_ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(UpperCAmelCase__ ) labels.append(UpperCAmelCase__ ) return img_paths, labels def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0.0 , ): lowercase_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase_ = int(scale_x * output_size[1] ) lowercase_ = int(scale_y * output_size[0] ) lowercase_ = [] lowercase_ = [] for i, index in enumerate(UpperCAmelCase__ ): lowercase_ = all_img_list[index] path_list.append(UpperCAmelCase__ ) lowercase_ = all_annos[index] lowercase_ = cva.imread(UpperCAmelCase__ ) if i == 0: # top-left lowercase_ = cva.resize(UpperCAmelCase__ , (divid_point_x, divid_point_y) ) lowercase_ = img for bbox in img_annos: lowercase_ = bbox[1] * scale_x lowercase_ = bbox[2] * scale_y lowercase_ = bbox[3] * scale_x lowercase_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right lowercase_ = cva.resize(UpperCAmelCase__ , (output_size[1] - divid_point_x, divid_point_y) ) lowercase_ = img for bbox in img_annos: lowercase_ = scale_x + bbox[1] * (1 - scale_x) lowercase_ = bbox[2] * scale_y lowercase_ = scale_x + bbox[3] * (1 - scale_x) lowercase_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left lowercase_ = cva.resize(UpperCAmelCase__ , (divid_point_x, output_size[0] - divid_point_y) ) lowercase_ = img for bbox in img_annos: lowercase_ = bbox[1] * scale_x lowercase_ = scale_y + bbox[2] * (1 - scale_y) lowercase_ = bbox[3] * scale_x lowercase_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right lowercase_ = cva.resize( UpperCAmelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) lowercase_ = img for bbox in img_annos: lowercase_ = scale_x + bbox[1] * (1 - scale_x) lowercase_ = scale_y + bbox[2] * (1 - scale_y) lowercase_ = scale_x + bbox[3] * (1 - scale_x) lowercase_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: lowercase_ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def UpperCAmelCase_ ( UpperCAmelCase__ ): assert number_char > 1, "The number of character should greater than 1" lowercase_ = ascii_lowercase + digits return "".join(random.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) ) if __name__ == "__main__": main() print('DONE ✅')
412
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
460
0
"""simple docstring""" import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def A__ ( _UpperCAmelCase : Any ) -> List[str]: '''simple docstring''' if is_torch_version("<" , "2.0.0" ) or not hasattr(UpperCAmelCase__ , "_dynamo" ): return False return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule ) def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : bool = True ) -> str: '''simple docstring''' snake_case__ : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) snake_case__ : List[Any] = is_compiled_module(UpperCAmelCase__ ) if is_compiled: snake_case__ : Any = model snake_case__ : Optional[int] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): snake_case__ : Union[str, Any] = model.module if not keep_fpaa_wrapper: snake_case__ : int = getattr(UpperCAmelCase__ , "forward" ) snake_case__ : int = model.__dict__.pop("_original_forward" , UpperCAmelCase__ ) if original_forward is not None: while hasattr(UpperCAmelCase__ , "__wrapped__" ): snake_case__ : int = forward.__wrapped__ if forward == original_forward: break snake_case__ : Any = forward if getattr(UpperCAmelCase__ , "_converted_to_transformer_engine" , UpperCAmelCase__ ): convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ ) if is_compiled: snake_case__ : Tuple = model snake_case__ : int = compiled_model return model def A__ ( ) -> Any: '''simple docstring''' PartialState().wait_for_everyone() def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Dict: '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(UpperCAmelCase__ , UpperCAmelCase__ ) elif PartialState().local_process_index == 0: torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) @contextmanager def A__ ( **_UpperCAmelCase : Any ) -> Dict: '''simple docstring''' for key, value in kwargs.items(): snake_case__ : List[Any] = str(UpperCAmelCase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def A__ ( _UpperCAmelCase : Union[str, Any] ) -> str: '''simple docstring''' if not hasattr(UpperCAmelCase__ , "__qualname__" ) and not hasattr(UpperCAmelCase__ , "__name__" ): snake_case__ : Tuple = getattr(UpperCAmelCase__ , "__class__" , UpperCAmelCase__ ) if hasattr(UpperCAmelCase__ , "__qualname__" ): return obj.__qualname__ if hasattr(UpperCAmelCase__ , "__name__" ): return obj.__name__ return str(UpperCAmelCase__ ) def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Dict: '''simple docstring''' for key, value in source.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): snake_case__ : Union[str, Any] = destination.setdefault(UpperCAmelCase__ , {} ) merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ ) else: snake_case__ : List[Any] = value return destination def A__ ( _UpperCAmelCase : int = None ) -> bool: '''simple docstring''' if port is None: snake_case__ : Any = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
715
"""simple docstring""" import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowercase = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } lowercase = {"""facebook/blenderbot_small-90M""": 512} def A__ ( _UpperCAmelCase : Dict ) -> Tuple: '''simple docstring''' snake_case__ : List[str] = set() snake_case__ : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : Any = char snake_case__ : List[str] = set(_UpperCAmelCase ) return pairs class SCREAMING_SNAKE_CASE_ ( _lowercase): '''simple docstring''' __magic_name__ : Dict = VOCAB_FILES_NAMES __magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP __magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="__start__" , lowerCamelCase__="__end__" , lowerCamelCase__="__unk__" , lowerCamelCase__="__null__" , **lowerCamelCase__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__) with open(lowerCamelCase__ , encoding="utf-8") as vocab_handle: snake_case__ : int = json.load(lowerCamelCase__) snake_case__ : List[str] = {v: k for k, v in self.encoder.items()} with open(lowerCamelCase__ , encoding="utf-8") as merges_handle: snake_case__ : Any = merges_handle.read().split("\n")[1:-1] snake_case__ : Optional[int] = [tuple(merge.split()) for merge in merges] snake_case__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__)))) snake_case__ : List[str] = {} @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return len(self.encoder) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder) def UpperCAmelCase ( self , lowerCamelCase__) -> str: '''simple docstring''' if token in self.cache: return self.cache[token] snake_case__ : Tuple = re.sub("([.,!?()])" , R" \1" , lowerCamelCase__) snake_case__ : List[Any] = re.sub("(')" , R" \1 " , lowerCamelCase__) snake_case__ : Dict = re.sub(R"\s{2,}" , " " , lowerCamelCase__) if "\n" in token: snake_case__ : Tuple = token.replace("\n" , " __newln__") snake_case__ : Optional[int] = token.split(" ") snake_case__ : int = [] for token in tokens: if not len(lowerCamelCase__): continue snake_case__ : str = token.lower() snake_case__ : List[str] = tuple(lowerCamelCase__) snake_case__ : str = tuple(list(word[:-1]) + [word[-1] + "</w>"]) snake_case__ : Optional[int] = get_pairs(lowerCamelCase__) if not pairs: words.append(lowerCamelCase__) continue while True: snake_case__ : int = min(lowerCamelCase__ , key=lambda lowerCamelCase__: self.bpe_ranks.get(lowerCamelCase__ , float("inf"))) if bigram not in self.bpe_ranks: break snake_case__, snake_case__ : Any = bigram snake_case__ : Optional[int] = [] snake_case__ : str = 0 while i < len(lowerCamelCase__): try: snake_case__ : Any = word.index(lowerCamelCase__ , lowerCamelCase__) new_word.extend(word[i:j]) snake_case__ : Tuple = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(lowerCamelCase__) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 snake_case__ : Optional[int] = tuple(lowerCamelCase__) snake_case__ : str = new_word if len(lowerCamelCase__) == 1: break else: snake_case__ : Optional[int] = get_pairs(lowerCamelCase__) snake_case__ : Tuple = "@@ ".join(lowerCamelCase__) snake_case__ : Union[str, Any] = word[:-4] snake_case__ : Any = word words.append(lowerCamelCase__) return " ".join(lowerCamelCase__) def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]: '''simple docstring''' snake_case__ : Any = [] snake_case__ : Union[str, Any] = re.findall(R"\S+\n?" , lowerCamelCase__) for token in words: split_tokens.extend(list(self.bpe(lowerCamelCase__).split(" "))) return split_tokens def UpperCAmelCase ( self , lowerCamelCase__) -> int: '''simple docstring''' snake_case__ : str = token.lower() return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token)) def UpperCAmelCase ( self , lowerCamelCase__) -> str: '''simple docstring''' return self.decoder.get(lowerCamelCase__ , self.unk_token) def UpperCAmelCase ( self , lowerCamelCase__) -> str: '''simple docstring''' snake_case__ : Optional[int] = " ".join(lowerCamelCase__).replace("@@ " , "").strip() return out_string def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCamelCase__): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return snake_case__ : int = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) snake_case__ : Tuple = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(lowerCamelCase__ , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__) + "\n") snake_case__ : str = 0 with open(lowerCamelCase__ , "w" , encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__: kv[1]): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!") snake_case__ : Tuple = token_index writer.write(" ".join(lowerCamelCase__) + "\n") index += 1 return vocab_file, merge_file
150
0
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging __lowerCamelCase = logging.get_logger(__name__) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( """Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise if not is_sharded: A_ = os.path.abspath(UpperCAmelCase__ ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) A_ = convert_pytorch_state_dict_to_flax(UpperCAmelCase__, UpperCAmelCase__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A_ = convert_pytorch_sharded_state_dict_to_flax(UpperCAmelCase__, UpperCAmelCase__ ) return flax_state_dict def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(UpperCAmelCase__ ) -> bool: return len(set(UpperCAmelCase__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A_ = pt_tuple_key[:-1] + ("""scale""",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(UpperCAmelCase__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A_ = pt_tuple_key[:-1] + ("""mean""",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A_ = pt_tuple_key[:-1] + ("""var""",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ): return renamed_pt_tuple_key, pt_tensor # embedding A_ = pt_tuple_key[:-1] + ("""embedding""",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(UpperCAmelCase__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A_ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ): A_ = pt_tensor.transpose(2, 3, 1, 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A_ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ): A_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A_ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A_ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A_ = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A_ = pt_tuple_key[-2] + """_g""" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A_ = pt_tuple_key[-2] + """_v""" if name is not None: A_ = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]: # convert pytorch tensor to numpy A_ = {k: v.numpy() for k, v in pt_state_dict.items()} A_ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A_ = flax_model.params["""params"""] else: A_ = flax_model.params A_ = flatten_dict(UpperCAmelCase__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A_ = flatten_dict(flax_model.params["""batch_stats"""] ) random_flax_state_dict.update(UpperCAmelCase__ ) A_ = {} A_ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) A_ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A_ = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary A_ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A_ = pt_tuple_key[1:] # Correctly rename weight parameters A_ , A_ = rename_key_and_reshape_tensor( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # add model prefix if necessary A_ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A_ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A_ = jnp.asarray(UpperCAmelCase__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ ) continue # also add unexpected weight so that warning is thrown A_ = jnp.asarray(UpperCAmelCase__ ) else: # also add unexpected weight so that warning is thrown A_ = jnp.asarray(UpperCAmelCase__ ) return unflatten_dict(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict: import torch # Load the index A_ = {} for shard_file in shard_filenames: # load using msgpack utils A_ = torch.load(UpperCAmelCase__ ) A_ = {k: v.numpy() for k, v in pt_state_dict.items()} A_ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A_ = flax_model.params["""params"""] A_ = flatten_dict(UpperCAmelCase__ ) random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) ) else: A_ = flax_model.params A_ = flatten_dict(UpperCAmelCase__ ) A_ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) A_ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A_ = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary A_ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A_ = pt_tuple_key[1:] # Correctly rename weight parameters A_ , A_ = rename_key_and_reshape_tensor( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # add model prefix if necessary A_ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A_ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A_ = jnp.asarray(UpperCAmelCase__ ) continue if "var" in flax_key[-1]: A_ = jnp.asarray(UpperCAmelCase__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ ) continue # also add unexpected weight so that warning is thrown A_ = jnp.asarray(UpperCAmelCase__ ) else: # also add unexpected weight so that warning is thrown A_ = jnp.asarray(UpperCAmelCase__ ) return unflatten_dict(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]: A_ = os.path.abspath(UpperCAmelCase__ ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class A_ = getattr(UpperCAmelCase__, """Flax""" + model.__class__.__name__ ) # load flax weight dict with open(UpperCAmelCase__, """rb""" ) as state_f: try: A_ = from_bytes(UpperCAmelCase__, state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]: try: import torch # noqa: F401 except ImportError: logger.error( """Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights A_ = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase__ : x.dtype == jnp.bfloataa, UpperCAmelCase__ ) ).values() if any(UpperCAmelCase__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) A_ = jax.tree_util.tree_map( lambda UpperCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, UpperCAmelCase__ ) A_ = flatten_dict(UpperCAmelCase__ ) A_ = pt_model.state_dict() A_ = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) A_ = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A_ = [] A_ = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A_ = flax_key_tuple[0] == pt_model.base_model_prefix A_ = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A_ = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A_ = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(UpperCAmelCase__ ) not in pt_model_dict: # conv layer A_ = flax_key_tuple[:-1] + ("""weight""",) A_ = jnp.transpose(UpperCAmelCase__, (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase__ ) not in pt_model_dict: # linear layer A_ = flax_key_tuple[:-1] + ("""weight""",) A_ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A_ = flax_key_tuple[:-1] + ("""weight""",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A_ = flax_key_tuple[:-1] + ("""running_mean""",) elif "var" in flax_key_tuple[-1]: A_ = flax_key_tuple[:-1] + ("""running_var""",) if "batch_stats" in flax_state: A_ = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A_ = """.""".join(UpperCAmelCase__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A_ = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A_ = key.split(""".""" ) A_ = None if key_components[-3::2] == ["parametrizations", "original0"]: A_ = key_components[-2] + """_g""" elif key_components[-3::2] == ["parametrizations", "original1"]: A_ = key_components[-2] + """_v""" if name is not None: A_ = key_components[:-3] + [name] A_ = """.""".join(UpperCAmelCase__ ) A_ = key if flax_key in special_pt_names: A_ = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict A_ = np.asarray(UpperCAmelCase__ ) if not isinstance(UpperCAmelCase__, np.ndarray ) else flax_tensor A_ = torch.from_numpy(UpperCAmelCase__ ) # remove from missing keys missing_keys.remove(UpperCAmelCase__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCAmelCase__ ) pt_model.load_state_dict(UpperCAmelCase__ ) # re-transform missing_keys to list A_ = list(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(UpperCAmelCase__ ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' """ use it for predictions and inference.""" ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' """If your task is similar to the task the model of the checkpoint was trained on, """ F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
288
'''simple docstring''' import os import sys import unittest __lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCamelCase = os.path.join(git_repo_path, '''src''', '''transformers''') __lowerCamelCase = ''' {0} = None ''' __lowerCamelCase = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __lowerCamelCase = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class A__ ( unittest.TestCase ): def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" ) self.assertIsNone(UpperCamelCase__ ) A_ = find_backend(""" if not is_tokenizers_available():""" ) self.assertEqual(UpperCamelCase__ , """tokenizers""" ) A_ = find_backend(""" if not is_tensorflow_text_available():""" ) self.assertEqual(UpperCamelCase__ , """tensorflow_text""" ) A_ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" ) self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" ) A_ = find_backend( """ if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" ) self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" ) A_ = find_backend( """ if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" ) self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" ) def snake_case_ ( self ) -> int: '''simple docstring''' A_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , UpperCamelCase__ ) self.assertIn("""tensorflow_text""" , UpperCamelCase__ ) self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertModel""" , objects["""tf"""] ) self.assertIn("""FlaxBertModel""" , objects["""flax"""] ) self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] ) self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] ) def snake_case_ ( self ) -> str: '''simple docstring''' A_ = create_dummy_object("""CONSTANT""" , """'torch'""" ) self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" ) A_ = create_dummy_object("""function""" , """'torch'""" ) self.assertEqual( UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) A_ = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ A_ = create_dummy_object("""FakeClass""" , """'torch'""" ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def snake_case_ ( self ) -> str: '''simple docstring''' A_ = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ A_ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
288
1
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __lowerCamelCase = Lock() def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(UpperCAmelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() _a : Union[str, Any] = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left _a : Optional[Any] = min(UpperCAmelCase , UpperCAmelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(UpperCAmelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() _a : Any = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right _a : List[Any] = max(UpperCAmelCase , UpperCAmelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(UpperCAmelCase ) def UpperCamelCase__ ( UpperCAmelCase ) -> Tuple: """simple docstring""" _a : Optional[Any] = [] _a : List[str] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop _a : str = Pipe() _a : Union[str, Any] = Pipe() process_array_.append( Process( target=UpperCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) _a : List[Any] = temp_rs _a : Tuple = temp_rr for i in range(1 , len(UpperCAmelCase ) - 1 ): _a : Union[str, Any] = Pipe() _a : str = Pipe() process_array_.append( Process( target=UpperCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) _a : Tuple = temp_rs _a : Dict = temp_rr process_array_.append( Process( target=UpperCAmelCase , args=( len(UpperCAmelCase ) - 1, arr[len(UpperCAmelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(UpperCAmelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(UpperCAmelCase ) ): _a : str = result_pipe[p][0].recv() process_array_[p].join() return arr def UpperCamelCase__ ( ) -> List[str]: """simple docstring""" _a : List[Any] = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*UpperCAmelCase ) _a : List[str] = odd_even_transposition(UpperCAmelCase ) print('''Sorted List\n''' ) print(*UpperCAmelCase ) if __name__ == "__main__": main()
307
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = {'vocab_file': 'vocab.txt'} __lowerCamelCase = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } __lowerCamelCase = { 'facebook/esm2_t6_8M_UR50D': 1_024, 'facebook/esm2_t12_35M_UR50D': 1_024, } def UpperCamelCase__ ( UpperCAmelCase ) -> int: """simple docstring""" with open(UpperCAmelCase , '''r''' ) as f: _a : List[str] = f.read().splitlines() return [l.strip() for l in lines] class UpperCamelCase_ ( UpperCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['''input_ids''', '''attention_mask'''] def __init__( self , lowercase , lowercase="<unk>" , lowercase="<cls>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="<eos>" , **lowercase , ) -> Dict: super().__init__(**lowercase ) _a : Optional[Any] = load_vocab_file(lowercase ) _a : str = dict(enumerate(self.all_tokens ) ) _a : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )} _a : List[Any] = unk_token _a : Dict = cls_token _a : Tuple = pad_token _a : List[Any] = mask_token _a : List[str] = eos_token _a : Union[str, Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def snake_case__( self , lowercase ) -> str: return self._id_to_token.get(lowercase , self.unk_token ) def snake_case__( self , lowercase ) -> int: return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) ) def snake_case__( self , lowercase , **lowercase ) -> Optional[Any]: return text.split() def snake_case__( self , lowercase=False ) -> Dict: return len(self._id_to_token ) def snake_case__( self ) -> int: return {token: i for i, token in enumerate(self.all_tokens )} def snake_case__( self , lowercase ) -> int: return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) ) def snake_case__( self , lowercase ) -> str: return self._id_to_token.get(lowercase , self.unk_token ) def snake_case__( self , lowercase , lowercase = None ) -> List[int]: _a : List[str] = [self.cls_token_id] _a : Dict = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def snake_case__( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] _a : Any = [1] + ([0] * len(lowercase )) + [1] if token_ids_a is not None: mask += [0] * len(lowercase ) + [1] return mask def snake_case__( self , lowercase , lowercase ) -> Tuple: _a : List[Any] = os.path.join(lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowercase , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def snake_case__( self ) -> int: return self.get_vocab_size(with_added_tokens=lowercase ) def snake_case__( self , lowercase , lowercase = False ) -> int: return super()._add_tokens(lowercase , special_tokens=lowercase )
307
1
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ('foo.json',)] ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = AutoConfig.from_pretrained('gpt2' ) lowerCamelCase_ = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = GenerationConfig() lowerCamelCase_ = { 'max_new_tokens': 1024, 'foo': 'bar', } lowerCamelCase_ = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = generation_config.update(**SCREAMING_SNAKE_CASE_ ) # update_kwargs was not modified (no side effects) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(SCREAMING_SNAKE_CASE_ , {'foo': 'bar'} ) def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = GenerationConfig() lowerCamelCase_ = 'bar' with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir: generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , 'bar' ) lowerCamelCase_ = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) assert not hasattr(SCREAMING_SNAKE_CASE_ , 'foo' ) # no new kwargs should be initialized if from config def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(default_config.num_beams , 1 ) lowerCamelCase_ = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase( cls ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def UpperCamelCase( cls ) -> Tuple: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='test-generation-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' ) except HTTPError: pass def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('test-generation-config' , use_auth_token=self._token ) lowerCamelCase_ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-generation-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='test-generation-config' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) lowerCamelCase_ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token ) lowerCamelCase_ = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='valid_org/test-generation-config-org' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) lowerCamelCase_ = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
42
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Dict = """philschmid/bart-large-cnn-samsum""" _SCREAMING_SNAKE_CASE : Optional[Any] = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) _SCREAMING_SNAKE_CASE : Tuple = """summarizer""" _SCREAMING_SNAKE_CASE : Any = AutoTokenizer _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM _SCREAMING_SNAKE_CASE : int = ["""text"""] _SCREAMING_SNAKE_CASE : List[Any] = ["""text"""] def a ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE__ ) def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: return self.model.generate(**SCREAMING_SNAKE_CASE__ )[0] def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple: return self.pre_processor.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
427
0
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: lowercase__ : Tuple = 128 elif "12-12" in model_name: lowercase__ : int = 12 lowercase__ : List[Any] = 12 elif "14-14" in model_name: lowercase__ : Union[str, Any] = 14 lowercase__ : Any = 14 elif "16-16" in model_name: lowercase__ : Union[str, Any] = 16 lowercase__ : int = 16 else: raise ValueError("Model not supported" ) lowercase__ : Tuple = "huggingface/label-files" if "speech-commands" in model_name: lowercase__ : Union[str, Any] = 35 lowercase__ : List[Any] = "speech-commands-v2-id2label.json" else: lowercase__ : Optional[Any] = 527 lowercase__ : Union[str, Any] = "audioset-id2label.json" lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : int = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : int = idalabel lowercase__ : int = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "module.v" in name: lowercase__ : str = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: lowercase__ : Any = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: lowercase__ : List[str] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: lowercase__ : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase__ : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: lowercase__ : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase__ : Dict = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase__ : Optional[int] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase__ : Optional[int] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase__ : Tuple = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase__ : Tuple = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase__ : int = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: lowercase__ : List[str] = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: lowercase__ : Optional[Any] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: lowercase__ : Union[str, Any] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase__ : Union[str, Any] = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: lowercase__ : Optional[Any] = key.split("." ) lowercase__ : List[Any] = int(key_split[3] ) lowercase__ : Union[str, Any] = config.hidden_size if "weight" in key: lowercase__ : Any = val[:dim, :] lowercase__ : str = val[dim : dim * 2, :] lowercase__ : List[str] = val[-dim:, :] else: lowercase__ : Tuple = val[:dim] lowercase__ : str = val[dim : dim * 2] lowercase__ : Dict = val[-dim:] else: lowercase__ : str = val return orig_state_dict def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(lowerCamelCase__ , lowerCamelCase__ ) @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" lowercase__ : Tuple = get_audio_spectrogram_transformer_config(lowerCamelCase__ ) lowercase__ : List[str] = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict lowercase__ : Optional[int] = model_name_to_url[model_name] lowercase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" ) # remove some keys remove_keys(lowerCamelCase__ ) # rename some keys lowercase__ : List[str] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) # load 🤗 model lowercase__ : int = ASTForAudioClassification(lowerCamelCase__ ) model.eval() model.load_state_dict(lowerCamelCase__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 lowercase__ : Union[str, Any] = -4.2677393 if "speech-commands" not in model_name else -6.845978 lowercase__ : int = 4.5689974 if "speech-commands" not in model_name else 5.5654526 lowercase__ : str = 1_024 if "speech-commands" not in model_name else 128 lowercase__ : Union[str, Any] = ASTFeatureExtractor(mean=lowerCamelCase__ , std=lowerCamelCase__ , max_length=lowerCamelCase__ ) if "speech-commands" in model_name: lowercase__ : Dict = load_dataset("speech_commands" , "v0.02" , split="validation" ) lowercase__ : Any = dataset[0]["audio"]["array"] else: lowercase__ : int = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) lowercase__ : Dict = torchaudio.load(lowerCamelCase__ ) lowercase__ : List[Any] = waveform.squeeze().numpy() lowercase__ : List[Any] = feature_extractor(lowerCamelCase__ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass lowercase__ : Dict = model(**lowerCamelCase__ ) lowercase__ : Any = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": lowercase__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": lowercase__ : List[str] = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": lowercase__ : int = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": lowercase__ : Any = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": lowercase__ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": lowercase__ : List[Any] = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": lowercase__ : Tuple = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": lowercase__ : int = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''ast-finetuned-audioset-10-10-0.4593''', type=str, help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
705
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase__ : Tuple = 192 lowercase__ : List[Any] = 768 lowercase__ : Tuple = 12 lowercase__ : List[str] = 3 lowercase__ : List[Any] = [800, 1_333] lowercase__ : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": lowercase__ : str = 330 lowercase__ : List[Any] = 14 lowercase__ : Tuple = 6 lowercase__ : Optional[int] = 1_320 elif "yolos_s" in yolos_name: lowercase__ : Dict = 384 lowercase__ : str = 1_536 lowercase__ : List[Any] = 12 lowercase__ : List[Any] = 6 elif "yolos_b" in yolos_name: lowercase__ : int = [800, 1_344] lowercase__ : Tuple = 91 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : Optional[int] = "coco-detection-id2label.json" lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : List[Any] = idalabel lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :] lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : str = in_proj_weight[-config.hidden_size :, :] lowercase__ : Tuple = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "backbone" in name: lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" ) if "cls_token" in name: lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowercase__ : int = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase__ : Optional[int] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase__ : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase__ : int = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: lowercase__ : Dict = key.split("." ) lowercase__ : List[Any] = int(key_split[2] ) lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase__ : str = val[:dim, :] lowercase__ : int = val[ dim : dim * 2, : ] lowercase__ : str = val[-dim:, :] else: lowercase__ : Tuple = val[:dim] lowercase__ : Any = val[dim : dim * 2] lowercase__ : Optional[Any] = val[-dim:] else: lowercase__ : Optional[Any] = val return orig_state_dict def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ ) # load original state_dict lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"] # load 🤗 model lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ ) model.eval() lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512 lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ ) lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase__ : int = model(**lowerCamelCase__ ) lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes lowercase__ , lowercase__ : int = None, None if yolos_name == "yolos_ti": lowercase__ : Optional[int] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowercase__ : Dict = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowercase__ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowercase__ : List[str] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowercase__ : Dict = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowercase__ : Tuple = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowercase__ : Optional[Any] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowercase__ : int = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowercase__ : List[str] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowercase__ : List[str] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: lowercase__ : Tuple = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowercase__ : Optional[int] = model_mapping[yolos_name] image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" ) model.push_to_hub(lowerCamelCase__ , organization="hustvl" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
81
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
356
'''simple docstring''' from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class a_ : def __init__( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : int=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=9_9 , __lowerCAmelCase : List[str]=3_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple="None" , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ): __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = relative_attention __snake_case = position_biased_input __snake_case = pos_att_type __snake_case = scope def lowercase__ ( self : Dict ): __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ): __snake_case = TFDebertaVaModel(config=__lowerCAmelCase ) __snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __snake_case = [input_ids, input_mask] __snake_case = model(__lowerCAmelCase ) __snake_case = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ): __snake_case = TFDebertaVaForMaskedLM(config=__lowerCAmelCase ) __snake_case = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __snake_case = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ): __snake_case = self.num_labels __snake_case = TFDebertaVaForSequenceClassification(config=__lowerCAmelCase ) __snake_case = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __snake_case = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ): __snake_case = self.num_labels __snake_case = TFDebertaVaForTokenClassification(config=__lowerCAmelCase ) __snake_case = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __snake_case = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ): __snake_case = TFDebertaVaForQuestionAnswering(config=__lowerCAmelCase ) __snake_case = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __snake_case = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Union[str, Any] ): __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowercase_ : List[str] = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) lowercase_ : Union[str, Any] = ( { '''feature-extraction''': TFDebertaVaModel, '''fill-mask''': TFDebertaVaForMaskedLM, '''question-answering''': TFDebertaVaForQuestionAnswering, '''text-classification''': TFDebertaVaForSequenceClassification, '''token-classification''': TFDebertaVaForTokenClassification, '''zero-shot''': TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) lowercase_ : int = False lowercase_ : List[Any] = False def lowercase__ ( self : Any ): __snake_case = TFDebertaVaModelTester(self ) __snake_case = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def lowercase__ ( self : Dict ): self.config_tester.run_common_tests() def lowercase__ ( self : Optional[int] ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowercase__ ( self : Union[str, Any] ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def lowercase__ ( self : List[Any] ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def lowercase__ ( self : Any ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def lowercase__ ( self : Dict ): __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def lowercase__ ( self : List[str] ): __snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class a_ ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def lowercase__ ( self : List[str] ): pass @slow def lowercase__ ( self : int ): __snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' ) __snake_case = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) __snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] __snake_case = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 )
356
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _A ( unittest.TestCase ): def _a (self ) -> Dict: '''simple docstring''' UpperCamelCase__ = tempfile.mkdtemp() UpperCamelCase__ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCamelCase__ = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } UpperCamelCase__ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a (self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _a (self ) -> List[str]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _a (self ) -> List[Any]: '''simple docstring''' UpperCamelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a (self ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = self.get_rust_tokenizer() UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCamelCase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCamelCase__ = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ ) def _a (self ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) UpperCamelCase__ = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def _a (self ) -> Tuple: '''simple docstring''' UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.prepare_image_inputs() UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a (self ) -> Tuple: '''simple docstring''' UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = '''lower newer''' UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a (self ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = '''lower newer''' UpperCamelCase__ = self.prepare_image_inputs() UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def _a (self ) -> Dict: '''simple docstring''' UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a (self ) -> str: '''simple docstring''' UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = '''lower newer''' UpperCamelCase__ = self.prepare_image_inputs() UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
720
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __UpperCamelCase ( A ): UpperCamelCase__ , UpperCamelCase__ = analyze_text(A ) UpperCamelCase__ = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. UpperCamelCase__ = sum(single_char_strings.values() ) # one length string UpperCamelCase__ = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCamelCase__ = single_char_strings[ch] UpperCamelCase__ = my_str / all_sum my_fir_sum += prob * math.loga(A ) # entropy formula. # print entropy print(f"{round(-1 * my_fir_sum ):.1f}" ) # two len string UpperCamelCase__ = sum(two_char_strings.values() ) UpperCamelCase__ = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCamelCase__ = cha + cha if sequence in two_char_strings: UpperCamelCase__ = two_char_strings[sequence] UpperCamelCase__ = int(A ) / all_sum my_sec_sum += prob * math.loga(A ) # print second entropy print(f"{round(-1 * my_sec_sum ):.1f}" ) # print the difference between them print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" ) def __UpperCamelCase ( A ): UpperCamelCase__ = Counter() # type: ignore UpperCamelCase__ = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(A ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __UpperCamelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
469
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def __lowerCAmelCase ( a_ ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(a_ , a_ ) def __lowerCAmelCase ( a_ ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = emb.weight.shape SCREAMING_SNAKE_CASE : Tuple = nn.Linear(a_ , a_ , bias=a_ ) SCREAMING_SNAKE_CASE : int = emb.weight.data return lin_layer def __lowerCAmelCase ( a_ , a_="facebook/mbart-large-en-ro" , a_=False , a_=False ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.load(a_ , map_location='cpu' )['model'] remove_ignore_keys_(a_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict['encoder.embed_tokens.weight'].shape[0] SCREAMING_SNAKE_CASE : Tuple = MBartConfig.from_pretrained(a_ , vocab_size=a_ ) if mbart_aa and finetuned: SCREAMING_SNAKE_CASE : str = 'relu' SCREAMING_SNAKE_CASE : List[str] = state_dict['decoder.embed_tokens.weight'] SCREAMING_SNAKE_CASE : Optional[Any] = MBartForConditionalGeneration(a_ ) model.model.load_state_dict(a_ ) if finetuned: SCREAMING_SNAKE_CASE : int = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowerCAmelCase :Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default="""facebook/mbart-large-cc25""", type=str, help="""Which huggingface architecture to use: mbart-large""", ) parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""") parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""") _lowerCAmelCase :Optional[int] = parser.parse_args() _lowerCAmelCase :Any = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
251
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): _lowerCAmelCase :Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowerCAmelCase :Any = 128_022 _lowerCAmelCase :Tuple = 128_028 @require_sentencepiece class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case__ : List[Any] = MaMaaaTokenizer snake_case__ : Tuple = False snake_case__ : Optional[int] = False snake_case__ : Tuple = True def _UpperCamelCase ( self ) -> List[str]: super().setUp() SCREAMING_SNAKE_CASE : Optional[int] = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] SCREAMING_SNAKE_CASE : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) SCREAMING_SNAKE_CASE : Dict = Path(self.tmpdirname ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] ) SCREAMING_SNAKE_CASE : int = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase ( self , **lowercase__ ) -> List[str]: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def _UpperCamelCase ( self , lowercase__ ) -> Any: return ( "This is a test", "This is a test", ) def _UpperCamelCase ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE : Tuple = '</s>' SCREAMING_SNAKE_CASE : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def _UpperCamelCase ( self ) -> Tuple: SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '</s>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '<s>' ) self.assertEqual(len(lowercase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('Skip this test while all models are still to be uploaded.' ) def _UpperCamelCase ( self ) -> Tuple: pass def _UpperCamelCase ( self ) -> Dict: SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase__ ) , [2, 3, 4, 5, 6] , ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_tokens_to_string(lowercase__ ) self.assertEqual(lowercase__ , 'This is a test' ) @slow def _UpperCamelCase ( self ) -> List[Any]: # fmt: off SCREAMING_SNAKE_CASE : str = {'input_ids': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' snake_case__ : Any = "facebook/m2m100_418M" snake_case__ : Optional[Any] = [ "In my opinion, there are two levels of response from the French government.", "NSA Affair Emphasizes Complete Lack of Debate on Intelligence", ] snake_case__ : int = [ "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "L'affaire NSA souligne l'absence totale de débat sur le renseignement", ] # fmt: off snake_case__ : Optional[Any] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def _UpperCamelCase ( cls ) -> Dict: SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en' , tgt_lang='fr' ) SCREAMING_SNAKE_CASE : Optional[Any] = 1 return cls def _UpperCamelCase ( self ) -> Optional[int]: self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128_063 ) def _UpperCamelCase ( self ) -> List[str]: SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.get_vocab() self.assertEqual(len(lowercase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['<unk>'] , 3 ) self.assertIn(self.tokenizer.get_lang_token('en' ) , lowercase__ ) def _UpperCamelCase ( self ) -> List[Any]: SCREAMING_SNAKE_CASE : str = 'en' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase__ ) def _UpperCamelCase ( self ) -> Any: self.assertIn(lowercase__ , self.tokenizer.all_special_ids ) # fmt: off SCREAMING_SNAKE_CASE : int = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) self.assertNotIn(self.tokenizer.eos_token , lowercase__ ) def _UpperCamelCase ( self ) -> int: SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowercase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = MaMaaaTokenizer.from_pretrained(lowercase__ ) self.assertDictEqual(new_tok.lang_token_to_id , lowercase__ ) @require_torch def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE : Dict = 'en' SCREAMING_SNAKE_CASE : int = 'fr' SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors='pt' ) SCREAMING_SNAKE_CASE : Union[str, Any] = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: SCREAMING_SNAKE_CASE : List[Any] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _UpperCamelCase ( self ) -> Dict: SCREAMING_SNAKE_CASE : Any = 'mr' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) SCREAMING_SNAKE_CASE : Union[str, Any] = 'zh' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _UpperCamelCase ( self ) -> List[str]: SCREAMING_SNAKE_CASE : Optional[int] = 'mr' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) SCREAMING_SNAKE_CASE : Optional[int] = 'zh' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _UpperCamelCase ( self ) -> Dict: SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' ) self.assertEqual( nested_simplify(lowercase__ ) , { # en_XX, A, test, EOS 'input_ids': [[128_022, 58, 4_183, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 128_006, } , )
251
1
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ = 1_6 snake_case_ = 3_2 def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str: """simple docstring""" return int(x / 2**2_0 ) class SCREAMING_SNAKE_CASE__ : def __enter__( self ): """simple docstring""" gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero SCREAMING_SNAKE_CASE_ : int = torch.cuda.memory_allocated() return self def __exit__( self , *lowercase__ ): """simple docstring""" gc.collect() torch.cuda.empty_cache() SCREAMING_SNAKE_CASE_ : Tuple = torch.cuda.memory_allocated() SCREAMING_SNAKE_CASE_ : Any = torch.cuda.max_memory_allocated() SCREAMING_SNAKE_CASE_ : List[str] = bamb(self.end - self.begin ) SCREAMING_SNAKE_CASE_ : Tuple = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 1_6 , SCREAMING_SNAKE_CASE_ : str = "bert-base-cased" , SCREAMING_SNAKE_CASE_ : int = 3_2_0 , SCREAMING_SNAKE_CASE_ : int = 1_6_0 , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ : List[str] = load_dataset( "glue" , "mrpc" , split={"train": F"train[:{n_train}]", "validation": F"validation[:{n_val}]"} ) def tokenize_function(SCREAMING_SNAKE_CASE_ : int ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_ : Dict = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(SCREAMING_SNAKE_CASE_ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" ) return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_ : List[str] = DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ : str = DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_ : str = config["lr"] SCREAMING_SNAKE_CASE_ : Optional[int] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_ : Dict = int(config["seed"] ) SCREAMING_SNAKE_CASE_ : Tuple = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_ : int = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ ) # Instantiate optimizer SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) SCREAMING_SNAKE_CASE_ : Dict = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) if accelerator.state.deepspeed_plugin is not None: SCREAMING_SNAKE_CASE_ : int = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): SCREAMING_SNAKE_CASE_ : str = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , ) else: SCREAMING_SNAKE_CASE_ : List[str] = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ : int = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # We need to keep track of how many total steps we have iterated over SCREAMING_SNAKE_CASE_ : List[str] = 0 # We also need to keep track of the stating epoch so files are named properly SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 # Now we train the model SCREAMING_SNAKE_CASE_ : List[str] = {} for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ : Optional[int] = model(**SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ : List[str] = outputs.loss SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) ) accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) ) accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) ) accelerator.print( "Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) SCREAMING_SNAKE_CASE_ : Dict = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , ) parser.add_argument( "--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--peak_memory_upper_bound" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , ) parser.add_argument( "--n_train" , type=SCREAMING_SNAKE_CASE_ , default=3_2_0 , help="Number of training examples to use." , ) parser.add_argument( "--n_val" , type=SCREAMING_SNAKE_CASE_ , default=1_6_0 , help="Number of validation examples to use." , ) parser.add_argument( "--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of train epochs." , ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_ : int = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
700
'''simple docstring''' import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def __lowerCamelCase ( ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Linear(2 , 4 ) SCREAMING_SNAKE_CASE_ : List[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) SCREAMING_SNAKE_CASE_ : Any = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) SCREAMING_SNAKE_CASE_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): @require_cuda def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(lowercase__ ): SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(cpu=lowercase__ ) def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator() SCREAMING_SNAKE_CASE_ : Any = GradientState() assert state.num_steps == 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4 assert state.num_steps == 4 assert state.sync_gradients is True SCREAMING_SNAKE_CASE_ : Optional[int] = False assert state.sync_gradients is False GradientState._reset_state() def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = Accelerator() SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components() ( ( SCREAMING_SNAKE_CASE_ ), ( SCREAMING_SNAKE_CASE_ ), ( SCREAMING_SNAKE_CASE_ ), ( SCREAMING_SNAKE_CASE_ ), ( SCREAMING_SNAKE_CASE_ ), ) : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = create_components() accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def __lowerCamelCase ( self ): """simple docstring""" PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*lowercase__ , **lowercase__ ): pass with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): SCREAMING_SNAKE_CASE_ : List[str] = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator() SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components() accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = get_signature(lowercase__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowercase__ ) # make sure random weights don't match load_random_weights(lowercase__ ) self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(lowercase__ ) self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 ) def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = Accelerator() SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components() accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_signature(lowercase__ ) # saving hook def save_config(lowercase__ , lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE_ : Optional[Any] = {"class_name": models[0].__class__.__name__} with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f: json.dump(lowercase__ , lowercase__ ) # loading hook def load_config(lowercase__ , lowercase__ ): with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f: SCREAMING_SNAKE_CASE_ : Any = json.load(lowercase__ ) SCREAMING_SNAKE_CASE_ : List[str] = config["class_name"] SCREAMING_SNAKE_CASE_ : Dict = accelerator.register_save_state_pre_hook(lowercase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowercase__ ) # make sure random weights don't match with hooks load_random_weights(lowercase__ ) self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 ) # random class name to verify correct one is loaded SCREAMING_SNAKE_CASE_ : Union[str, Any] = "random" # make sure loaded weights match with hooks accelerator.load_state(lowercase__ ) self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowercase__ ) # make sure random weights don't match with hooks removed load_random_weights(lowercase__ ) self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 ) # random class name to verify correct one is loaded SCREAMING_SNAKE_CASE_ : Tuple = "random" # make sure loaded weights match with hooks removed accelerator.load_state(lowercase__ ) self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator() SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = create_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] = None # This should work SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) self.assertTrue(dummy_obj is None ) def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator() SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 3] # This should work SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) self.assertEqual( getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def __lowerCamelCase ( self ): """simple docstring""" from transformers import AutoModelForCausalLM SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , ) SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator() # This should work SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(lowercase__ ) @slow @require_bnb def __lowerCamelCase ( self ): """simple docstring""" from transformers import AutoModelForCausalLM SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator() with init_empty_weights(): SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = "cpu" SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ ) # This should not work and get value error with self.assertRaises(lowercase__ ): SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(lowercase__ ) @slow @require_bnb @require_multi_gpu def __lowerCamelCase ( self ): """simple docstring""" from transformers import AutoModelForCausalLM SCREAMING_SNAKE_CASE_ : str = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() SCREAMING_SNAKE_CASE_ : str = infer_auto_device_map(lowercase__ ) SCREAMING_SNAKE_CASE_ : Dict = 1 SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , ) SCREAMING_SNAKE_CASE_ : Any = Accelerator() # This should not work and get value error with self.assertRaises(lowercase__ ): SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(lowercase__ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def __lowerCamelCase ( self ): """simple docstring""" from transformers import AutoModelForCausalLM with init_empty_weights(): SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ ) SCREAMING_SNAKE_CASE_ : List[str] = 1 SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , ) SCREAMING_SNAKE_CASE_ : Any = Accelerator() # This should work SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowercase__ ) @require_cuda def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(10 , 10 ) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 ) SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=lowercase__ ) SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowercase__ )
68
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _A : List[str] = logging.get_logger(__name__) _A : List[Any] = { """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class a__ ( a_ ): __lowerCAmelCase = """sew""" def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.0_2 , _a=1E-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.0_5 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ): super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowercase : Union[str, Any] = hidden_size lowercase : Dict = feat_extract_norm lowercase : Any = feat_extract_activation lowercase : List[Any] = list(_a ) lowercase : Optional[int] = list(_a ) lowercase : Optional[Any] = list(_a ) lowercase : Dict = conv_bias lowercase : List[str] = num_conv_pos_embeddings lowercase : str = num_conv_pos_embedding_groups lowercase : Any = len(self.conv_dim ) lowercase : int = num_hidden_layers lowercase : Any = intermediate_size lowercase : Optional[Any] = squeeze_factor lowercase : str = hidden_act lowercase : Union[str, Any] = num_attention_heads lowercase : Optional[Any] = hidden_dropout lowercase : Union[str, Any] = attention_dropout lowercase : Any = activation_dropout lowercase : Union[str, Any] = feat_proj_dropout lowercase : Union[str, Any] = final_dropout lowercase : int = layerdrop lowercase : Dict = layer_norm_eps lowercase : Any = initializer_range lowercase : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase : int = apply_spec_augment lowercase : int = mask_time_prob lowercase : Optional[int] = mask_time_length lowercase : int = mask_time_min_masks lowercase : str = mask_feature_prob lowercase : Dict = mask_feature_length lowercase : Any = mask_feature_min_masks # ctc loss lowercase : str = ctc_loss_reduction lowercase : Union[str, Any] = ctc_zero_infinity # sequence classification lowercase : List[str] = use_weighted_layer_sum lowercase : Optional[int] = classifier_proj_size @property def __magic_name__ ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
361
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A : str = logging.get_logger(__name__) _A : Tuple = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class a__ ( a_ ): __lowerCAmelCase = """big_bird""" def __init__( self , _a=50_358 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=4_096 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=True , _a=0 , _a=1 , _a=2 , _a=66 , _a="block_sparse" , _a=True , _a=False , _a=64 , _a=3 , _a=None , **_a , ): super().__init__( pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , ) lowercase : List[Any] = vocab_size lowercase : Optional[Any] = max_position_embeddings lowercase : str = hidden_size lowercase : int = num_hidden_layers lowercase : Optional[Any] = num_attention_heads lowercase : List[Any] = intermediate_size lowercase : int = hidden_act lowercase : str = hidden_dropout_prob lowercase : List[Any] = attention_probs_dropout_prob lowercase : Tuple = initializer_range lowercase : Optional[int] = type_vocab_size lowercase : str = layer_norm_eps lowercase : Tuple = use_cache lowercase : Any = rescale_embeddings lowercase : List[str] = attention_type lowercase : int = use_bias lowercase : Dict = block_size lowercase : List[str] = num_random_blocks lowercase : int = classifier_dropout class a__ ( a_ ): @property def __magic_name__ ( self ): if self.task == "multiple-choice": lowercase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
361
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = """▁""" __snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""} __snake_case = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model""" ), } } __snake_case = { """facebook/nllb-200-distilled-600M""": 1024, } # fmt: off __snake_case = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : str = VOCAB_FILES_NAMES __UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask'''] __UpperCAmelCase : List[int] = [] __UpperCAmelCase : List[int] = [] def __init__( self , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Any: '''simple docstring''' snake_case : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token snake_case : str = {} if sp_model_kwargs is None else sp_model_kwargs snake_case : List[str] = legacy_behaviour super().__init__( bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase__ ) ) snake_case : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token snake_case : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case : List[Any] = 1 snake_case : List[str] = len(self.sp_model ) snake_case : Dict = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ ) } snake_case : int = {v: k for k, v in self.lang_code_to_id.items()} snake_case : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) snake_case : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} snake_case : Tuple = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) snake_case : str = src_lang if src_lang is not None else "eng_Latn" snake_case : Tuple = self.lang_code_to_id[self._src_lang] snake_case : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Tuple: '''simple docstring''' snake_case : str = self.__dict__.copy() snake_case : Dict = None snake_case : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case : List[Any] = {} snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowerCamelCase ( self ) -> str: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCamelCase ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def lowerCamelCase ( self , UpperCamelCase__ ) -> None: '''simple docstring''' snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) snake_case : int = [1] * len(self.prefix_tokens ) snake_case : Dict = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: '''simple docstring''' snake_case : Union[str, Any] = [self.sep_token_id] snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) snake_case : Optional[Any] = src_lang snake_case : str = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) snake_case : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase__ ) snake_case : str = tgt_lang_id return inputs def lowerCamelCase ( self ) -> Dict: '''simple docstring''' snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case : Union[str, Any] = self.sp_model.PieceToId(UpperCamelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' snake_case : str = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCamelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return snake_case : Tuple = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: snake_case : Tuple = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = "eng_Latn" , UpperCamelCase__ = None , UpperCamelCase__ = "fra_Latn" , **UpperCamelCase__ , ) -> BatchEncoding: '''simple docstring''' snake_case : Any = src_lang snake_case : Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase ( self , UpperCamelCase__ ) -> None: '''simple docstring''' snake_case : Optional[int] = self.lang_code_to_id[src_lang] if self.legacy_behaviour: snake_case : str = [] snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code] else: snake_case : List[str] = [self.cur_lang_code] snake_case : Optional[int] = [self.eos_token_id] def lowerCamelCase ( self , UpperCamelCase__ ) -> None: '''simple docstring''' snake_case : List[Any] = self.lang_code_to_id[lang] if self.legacy_behaviour: snake_case : Optional[Any] = [] snake_case : Any = [self.eos_token_id, self.cur_lang_code] else: snake_case : Dict = [self.cur_lang_code] snake_case : str = [self.eos_token_id]
117
"""simple docstring""" from __future__ import annotations from cmath import sqrt def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : int ) -> tuple[complex, complex]: """simple docstring""" if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) snake_case : Dict = b * b - 4 * a * c snake_case : Tuple = (-b + sqrt(lowercase )) / (2 * a) snake_case : Optional[int] = (-b - sqrt(lowercase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __lowerCAmelCase ( ) -> List[Any]: """simple docstring""" snake_case ,snake_case : Optional[Any] = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
117
1
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): def wrapper(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Union[str, Any] = timeit.default_timer() __lowerCamelCase : Optional[Any] = func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : List[str] = timeit.default_timer() - starttime return delta __lowerCamelCase : Any = func.__name__ return wrapper def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=None ): __lowerCamelCase : Any = [] __lowerCamelCase : List[Any] = seq_shapes or {} for i in range(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[int] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(SCREAMING_SNAKE_CASE__ , _ArrayXD ): __lowerCamelCase : str = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Value ): if v.dtype == "string": __lowerCamelCase : List[str] = 'The small grey turtle was surprisingly fast when challenged.' else: __lowerCamelCase : Dict = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ): while isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ): __lowerCamelCase : Tuple = v.feature __lowerCamelCase : Union[str, Any] = seq_shapes[k] __lowerCamelCase : Union[str, Any] = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype ) __lowerCamelCase : Optional[Any] = data dummy_data.append((i, example) ) return dummy_data def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=None ): __lowerCamelCase : Union[str, Any] = generate_examples(SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes=SCREAMING_SNAKE_CASE__ ) with ArrowWriter(features=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ ) as writer: for key, record in dummy_data: __lowerCamelCase : str = features.encode_example(SCREAMING_SNAKE_CASE__ ) writer.write(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Tuple = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) __lowerCamelCase : Optional[Any] = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) ) return dataset
669
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class A_ ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __snake_case = 1 @register_to_config def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ): __lowerCamelCase : Dict = None __lowerCamelCase : Any = None __lowerCamelCase : Optional[int] = None def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ): __lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a ) def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score __lowerCamelCase : Tuple = ( -0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) __lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) __lowerCamelCase : Optional[Any] = std.flatten() while len(std.shape ) < len(score.shape ): __lowerCamelCase : List[str] = std.unsqueeze(-1 ) __lowerCamelCase : Any = -score / std # compute __lowerCamelCase : List[Any] = -1.0 / len(self.timesteps ) __lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) __lowerCamelCase : Dict = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): __lowerCamelCase : int = beta_t.unsqueeze(-1 ) __lowerCamelCase : Any = -0.5 * beta_t * x __lowerCamelCase : List[Any] = torch.sqrt(a ) __lowerCamelCase : Tuple = drift - diffusion**2 * score __lowerCamelCase : str = x + drift * dt # add noise __lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype ) __lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self: Optional[int] ): return self.config.num_train_timesteps
669
1
from torch import nn def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
643
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase : Any = logging.get_logger(__name__) __UpperCAmelCase : int = "▁" __UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} __UpperCAmelCase : Dict = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } __UpperCAmelCase : Dict = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } __UpperCAmelCase : str = { "ernie-m-base": 514, "ernie-m-large": 514, } __UpperCAmelCase : Optional[int] = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = ["input_ids"] __UpperCamelCase : List[str] = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[str] = RESOURCE_FILES_NAMES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = do_lower_case UpperCamelCase : Dict = sentencepiece_model_ckpt UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )} UpperCamelCase : str = {v: k for k, v in self.vocab.items()} def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if text is None: return None UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : str = '''''', [] for i, ch in enumerate(__SCREAMING_SNAKE_CASE ): if ch in self.SP_CHAR_MAPPING: UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE ) if self.is_whitespace(__SCREAMING_SNAKE_CASE ): continue normalized_text += ch char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0 if self.do_lower_case: UpperCamelCase : Tuple = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCamelCase : Any = token[1:] UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCamelCase : str = end return token_mapping @property def _lowercase ( self ): """simple docstring""" return len(self.vocab ) def _lowercase ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.__dict__.copy() UpperCamelCase : str = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase : Optional[int] = {} UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCamelCase : List[str] = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = [] for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0: new_pieces.append(__SCREAMING_SNAKE_CASE ) continue else: continue UpperCamelCase : Any = 0 for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase : Union[str, Any] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase : Any = i if len(__SCREAMING_SNAKE_CASE ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase : Any = [self.cls_token_id] UpperCamelCase : str = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__SCREAMING_SNAKE_CASE ) == 1: UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE ) if cat == "Zs": return True return False def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = {} with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Tuple = line.rstrip('''\n''' ) UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE ) return token_to_idx def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Union[str, Any] = 0 if os.path.isdir(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Dict = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCamelCase : List[Any] = token_index writer.write(token + '''\n''' ) index += 1 UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' ) with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (vocab_file,)
643
1
"""simple docstring""" from manim import * class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase : int = Rectangle(height=0.25 , width=0.25 ) UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )] UpperCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )] UpperCAmelCase : Optional[int] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : List[str] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : Union[str, Any] = Text("""CPU""" , font_size=24 ) UpperCAmelCase : Tuple = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )] UpperCAmelCase : Optional[int] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : List[Any] = Text("""GPU""" , font_size=24 ) UpperCAmelCase : Any = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE ) gpu.move_to([-1, -1, 0] ) self.add(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )] UpperCAmelCase : Optional[int] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : Dict = Text("""Model""" , font_size=24 ) UpperCAmelCase : Any = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE ) model.move_to([3, -1.0, 0] ) self.add(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = [] UpperCAmelCase : Any = [] for i, rect in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase : Any = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 ) target.move_to(_SCREAMING_SNAKE_CASE ) model_arr.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_SCREAMING_SNAKE_CASE ) self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = [meta_mem.copy() for i in range(6 )] UpperCAmelCase : Tuple = [meta_mem.copy() for i in range(6 )] UpperCAmelCase : Optional[int] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : Dict = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : str = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 ) UpperCAmelCase : Optional[int] = Text("""Disk""" , font_size=24 ) UpperCAmelCase : Optional[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE ) disk.move_to([-4, -1.25, 0] ) self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase : Optional[int] = MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = MarkupText( F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = MarkupText( F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Any = Square(0.3 ) input.set_fill(_SCREAMING_SNAKE_CASE , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _SCREAMING_SNAKE_CASE , buff=0.5 ) self.play(Write(_SCREAMING_SNAKE_CASE ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.02 ) self.play(MoveToTarget(_SCREAMING_SNAKE_CASE ) ) self.play(FadeOut(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : List[Any] = Arrow(start=_SCREAMING_SNAKE_CASE , end=_SCREAMING_SNAKE_CASE , color=_SCREAMING_SNAKE_CASE , buff=0.5 ) a.next_to(model_arr[0].get_left() , _SCREAMING_SNAKE_CASE , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) UpperCAmelCase : Any = MarkupText( F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) ) UpperCAmelCase : int = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02} self.play( Write(_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) UpperCAmelCase : Dict = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _SCREAMING_SNAKE_CASE , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) UpperCAmelCase : Union[str, Any] = AnimationGroup( FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_SCREAMING_SNAKE_CASE ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: UpperCAmelCase : str = 0.7 self.play( Circumscribe(model_arr[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) UpperCAmelCase : List[Any] = a_c UpperCAmelCase : int = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_SCREAMING_SNAKE_CASE ) , FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , ) UpperCAmelCase : Any = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(_SCREAMING_SNAKE_CASE ) ) self.wait()
160
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path A: Optional[int] = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def _snake_case ( UpperCamelCase : List[str]=True ): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) ) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Tuple = None def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' with TemporaryDirectory() as tmp_dir: UpperCAmelCase : str = dataset_module_factory(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path , dataset=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE , hash=dataset_module.hash , ) UpperCAmelCase : Optional[int] = """/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=_SCREAMING_SNAKE_CASE ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) UpperCAmelCase : Tuple = cached_path(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) ) @pytest.mark.integration def _snake_case ( UpperCamelCase : int ): UpperCAmelCase : str = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" UpperCAmelCase : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase ) UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path ) UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam UpperCAmelCase : List[str] = None builder_instance.download_and_prepare() UpperCAmelCase : List[Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def _snake_case ( UpperCamelCase : str ): UpperCAmelCase : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase ) UpperCAmelCase : Optional[Any] = import_main_class(dataset_module.module_path , dataset=UpperCamelCase ) UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , ) UpperCAmelCase : Any = builder_instance.as_streaming_dataset() assert ds assert isinstance(UpperCamelCase , UpperCamelCase ) assert "train" in ds assert isinstance(ds["""train"""] , UpperCamelCase ) assert next(iter(ds["""train"""] ) )
160
1
'''simple docstring''' import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE__ = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowerCamelCase ( _snake_case : List[str] ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def lowerCamelCase ( _snake_case : Dict ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main lowercase__ = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(_snake_case ,id=_snake_case )
700
'''simple docstring''' def lowerCamelCase ( _snake_case : List[str] ): # noqa: E741 '''simple docstring''' lowercase__ = len(_snake_case ) lowercase__ = 0 lowercase__ = [0] * n lowercase__ = [False] * n lowercase__ = [False] * n def dfs(_snake_case : Tuple ,_snake_case : int ,_snake_case : List[str] ,_snake_case : str ): if parent == root: out_edge_count += 1 lowercase__ = True lowercase__ = at for to in l[at]: if to == parent: pass elif not visited[to]: lowercase__ = dfs(_snake_case ,_snake_case ,_snake_case ,_snake_case ) lowercase__ = min(low[at] ,low[to] ) # AP found via bridge if at < low[to]: lowercase__ = True # AP found via cycle if at == low[to]: lowercase__ = True else: lowercase__ = min(low[at] ,_snake_case ) return out_edge_count for i in range(_snake_case ): if not visited[i]: lowercase__ = 0 lowercase__ = dfs(_snake_case ,_snake_case ,-1 ,_snake_case ) lowercase__ = out_edge_count > 1 for x in range(len(_snake_case ) ): if is_art[x] is True: print(_snake_case ) # Adjacency list of graph SCREAMING_SNAKE_CASE__ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
539
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
36
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __lowercase : List[str] = logging.get_logger(__name__) class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : Tuple = '''vision-encoder-decoder''' __lowerCamelCase : List[Any] = True def __init__( self ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F"""A configuraton of type {self.model_type} cannot be instantiated because """ F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) snake_case : Union[str, Any] = kwargs.pop("""encoder""" ) snake_case : Any = encoder_config.pop("""model_type""" ) snake_case : Optional[Any] = kwargs.pop("""decoder""" ) snake_case : Union[str, Any] = decoder_config.pop("""model_type""" ) snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) snake_case : int = True @classmethod def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) snake_case : Tuple = True snake_case : Union[str, Any] = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ): '''simple docstring''' snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ ) snake_case : Union[str, Any] = self.encoder.to_dict() snake_case : Union[str, Any] = self.decoder.to_dict() snake_case : Dict = self.__class__.model_type return output class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : Optional[Any] = version.parse('''1.11''' ) @property def snake_case_ ( self ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def snake_case_ ( self ): '''simple docstring''' return 1E-4 @property def snake_case_ ( self ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class _A ( snake_case ): '''simple docstring''' @property def snake_case_ ( self ): '''simple docstring''' snake_case : Tuple = OrderedDict() snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,): '''simple docstring''' import torch snake_case : Optional[Any] = OrderedDict() snake_case : Tuple = super().generate_dummy_inputs( SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ ) snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size) snake_case : List[str] = dummy_input.pop("""input_ids""" ) snake_case : int = dummy_input.pop("""attention_mask""" ) snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ ) return common_inputs class _A ( snake_case ): '''simple docstring''' @property def snake_case_ ( self ): '''simple docstring''' pass def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ): '''simple docstring''' snake_case : int = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
36
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A : Dict = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : int = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Optional[int] = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : List[Any] = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
330
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin _A : List[str] = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class _lowercase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaTokenizer _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : Tuple = True def a ( self : Any ) -> Any: super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = SpeechTaTokenizer(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = AddedToken("""<mask>""" , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: __lowerCAmelCase = """this is a test""" __lowerCAmelCase = """this is a test""" return input_text, output_text def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=20 , SCREAMING_SNAKE_CASE__ : List[str]=5 ) -> str: __lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return text, ids def a ( self : Dict ) -> Union[str, Any]: __lowerCAmelCase = """<pad>""" __lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def a ( self : Dict ) -> List[Any]: __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 81 ) def a ( self : Optional[Any] ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def a ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCAmelCase = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCAmelCase = tokenizer.vocab_size __lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __lowerCAmelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] __lowerCAmelCase = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.vocab_size __lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size + len(SCREAMING_SNAKE_CASE__ ) ) __lowerCAmelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __lowerCAmelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} __lowerCAmelCase = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer.vocab_size __lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size_a + len(SCREAMING_SNAKE_CASE__ ) ) __lowerCAmelCase = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def a ( self : Tuple ) -> Union[str, Any]: pass def a ( self : Optional[Any] ) -> List[str]: pass def a ( self : Optional[Any] ) -> str: __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) __lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) __lowerCAmelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) # fmt: off self.assertListEqual(SCREAMING_SNAKE_CASE__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on __lowerCAmelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def a ( self : Optional[Any] ) -> List[str]: # Use custom sequence because this tokenizer does not handle numbers. __lowerCAmelCase = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off __lowerCAmelCase = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=SCREAMING_SNAKE_CASE__ , )
330
1
"""simple docstring""" def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) A__ = str(bin(UpperCAmelCase_ ) )[2:] # remove the leading "0b" A__ = str(bin(UpperCAmelCase_ ) )[2:] # remove the leading "0b" A__ = max(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase_ ), b_binary.zfill(UpperCAmelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
104
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCamelCase = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def _lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) A__ = bs[:] A__ = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCAmelCase_ ) cs.append(2**8 + n ) n += 1 A__ = [chr(UpperCAmelCase_ ) for n in cs] return dict(zip(UpperCAmelCase_, UpperCAmelCase_ ) ) def _lowerCamelCase ( UpperCAmelCase_ : str ) -> List[str]: """simple docstring""" A__ = set() A__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A__ = char return pairs class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Union[str, Any] = VOCAB_FILES_NAMES A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP A__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Tuple: A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle: A__ = json.load(SCREAMING_SNAKE_CASE__ ) A__ = {v: k for k, v in self.encoder.items()} A__ = errors # how to handle errors in decoding A__ = bytes_to_unicode() A__ = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as merges_handle: A__ = merges_handle.read().split("\n" )[1:-1] A__ = [tuple(merge.split() ) for merge in bpe_merges] A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) A__ = {} A__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def snake_case__ ( self ) -> List[Any]: return len(self.encoder ) def snake_case__ ( self ) -> List[Any]: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict: if token in self.cache: return self.cache[token] A__ = tuple(SCREAMING_SNAKE_CASE__ ) A__ = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: return token while True: A__ = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break A__ , A__ = bigram A__ = [] A__ = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: A__ = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A__ = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A__ = tuple(SCREAMING_SNAKE_CASE__ ) A__ = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: A__ = get_pairs(SCREAMING_SNAKE_CASE__ ) A__ = " ".join(SCREAMING_SNAKE_CASE__ ) A__ = word return word def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: A__ = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ): A__ = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(" " ) ) return bpe_tokens def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str: return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any: return self.decoder.get(SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict: A__ = "".join(SCREAMING_SNAKE_CASE__ ) A__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A__ = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A__ = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + "\n" ) A__ = 0 with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) A__ = token_index writer.write(" ".join(SCREAMING_SNAKE_CASE__ ) + "\n" ) index += 1 return vocab_file, merge_file def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ = [self.cls_token_id] A__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]: A__ = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()): A__ = " " + text return (text, kwargs)
104
1
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , ) -> List[str]: __UpperCamelCase = size if size is not None else {'height': 18, 'width': 18} __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = num_channels __UpperCamelCase = image_size __UpperCamelCase = min_resolution __UpperCamelCase = max_resolution __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = do_normalize def __lowercase( self ) -> Dict: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = ImageGPTImageProcessor if is_vision_available() else None def __lowercase( self ) -> Union[str, Any]: __UpperCamelCase = ImageGPTImageProcessingTester(self ) @property def __lowercase( self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def __lowercase( self ) -> Union[str, Any]: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'clusters' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) def __lowercase( self ) -> Optional[int]: __UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) __UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __lowercase( self ) -> Any: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) __UpperCamelCase = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE ) def __lowercase( self ) -> List[str]: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'image_processor.json' ) image_processor_first.to_json_file(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.image_processing_class.from_json_file(_SCREAMING_SNAKE_CASE ).to_dict() __UpperCamelCase = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _SCREAMING_SNAKE_CASE ) def __lowercase( self ) -> List[str]: __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.image_processing_class.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict() __UpperCamelCase = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _SCREAMING_SNAKE_CASE ) @unittest.skip('ImageGPT requires clusters at initialization' ) def __lowercase( self ) -> Union[str, Any]: pass def _a ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) __UpperCamelCase = Image.open(dataset[4]['file'] ) __UpperCamelCase = Image.open(dataset[5]['file'] ) __UpperCamelCase = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase( self ) -> List[Any]: __UpperCamelCase = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) __UpperCamelCase = prepare_images() # test non-batched __UpperCamelCase = image_processing(images[0] , return_tensors='pt' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_024) ) __UpperCamelCase = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , _SCREAMING_SNAKE_CASE ) # test batched __UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_024) ) __UpperCamelCase = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , _SCREAMING_SNAKE_CASE )
567
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( _lowercase ): """simple docstring""" UpperCAmelCase__ = ["image_processor", "tokenizer"] UpperCAmelCase__ = "AutoImageProcessor" UpperCAmelCase__ = "AutoTokenizer" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase = self.image_processor def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: __UpperCamelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if images is not None: __UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE ) def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]: return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]: return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def __lowercase( self ) -> List[Any]: return ["input_ids", "attention_mask", "pixel_values"]
567
1
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _A : str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _lowercase ( datasets.BuilderConfig ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None def UpperCamelCase_ ( snake_case_ : "pyspark.sql.DataFrame" , snake_case_ : List[int] , ) -> Union[str, Any]: '''simple docstring''' import pyspark def generate_fn(): __lowerCAmelCase = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: __lowerCAmelCase = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" ) __lowerCAmelCase = partition_df.collect() __lowerCAmelCase = 0 for row in rows: yield f"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class _lowercase ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Tuple: __lowerCAmelCase = df __lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() ) __lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : int ) -> Dict: yield from self.generate_examples_fn() def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.random.Generator ) -> "SparkExamplesIterable": __lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> "SparkExamplesIterable": __lowerCAmelCase = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) @property def a ( self : str ) -> int: return len(self.partition_order ) class _lowercase ( datasets.DatasetBuilder ): '''simple docstring''' _SCREAMING_SNAKE_CASE : int = SparkConfig def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Optional[int]: import pyspark __lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate() __lowerCAmelCase = df __lowerCAmelCase = working_dir super().__init__( cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , ) def a ( self : Optional[int] ) -> str: # Returns the path of the created file. def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(SCREAMING_SNAKE_CASE__ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __lowerCAmelCase = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def a ( self : Union[str, Any] ) -> Union[str, Any]: return datasets.DatasetInfo(features=self.config.features ) def a ( self : str , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: import pyspark def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : List[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) __lowerCAmelCase = self.df.count() __lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __lowerCAmelCase = ( self.df.limit(SCREAMING_SNAKE_CASE__ ) .repartition(1 ) .mapInArrow(SCREAMING_SNAKE_CASE__ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) __lowerCAmelCase = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __lowerCAmelCase = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) ) __lowerCAmelCase = self.df.repartition(SCREAMING_SNAKE_CASE__ ) def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark __lowerCAmelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter __lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath __lowerCAmelCase = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __lowerCAmelCase = self.config.features __lowerCAmelCase = self._writer_batch_size __lowerCAmelCase = self._fs.storage_options def write_arrow(SCREAMING_SNAKE_CASE__ : int ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __lowerCAmelCase = pyspark.TaskContext().taskAttemptId() __lowerCAmelCase = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) __lowerCAmelCase = 0 __lowerCAmelCase = writer_class( features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = pa.Table.from_batches([first_batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __lowerCAmelCase , __lowerCAmelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 __lowerCAmelCase = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = pa.Table.from_batches([batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) if writer._num_bytes > 0: __lowerCAmelCase , __lowerCAmelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ): __lowerCAmelCase = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = ( self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> List[str]: self._validate_cache_dir() __lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = not is_remote_filesystem(self._fs ) __lowerCAmelCase = os.path.join if is_local else posixpath.join __lowerCAmelCase = """-TTTTT-SSSSS-of-NNNNN""" __lowerCAmelCase = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" __lowerCAmelCase = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] __lowerCAmelCase = [] for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = total_num_examples __lowerCAmelCase = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: __lowerCAmelCase = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __lowerCAmelCase = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ): rename( SCREAMING_SNAKE_CASE__ , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , ) __lowerCAmelCase = [] __lowerCAmelCase = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i] for shard_id in range(SCREAMING_SNAKE_CASE__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect() else: # don't use any pattern __lowerCAmelCase = 0 __lowerCAmelCase = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(SCREAMING_SNAKE_CASE__ , """""" ) , ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df )
427
'''simple docstring''' def UpperCamelCase_ ( snake_case_ : int ) -> int: '''simple docstring''' if n == 1 or not isinstance(snake_case_ , snake_case_ ): return 0 elif n == 2: return 1 else: __lowerCAmelCase = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase_ ( snake_case_ : int ) -> int: '''simple docstring''' __lowerCAmelCase = 0 __lowerCAmelCase = 2 while digits < n: index += 1 __lowerCAmelCase = len(str(fibonacci(snake_case_ ) ) ) return index def UpperCamelCase_ ( snake_case_ : int = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(snake_case_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
427
1
"""simple docstring""" def _lowerCamelCase ( __a = 10 ): if not isinstance(__a, __a ) or n < 0: raise ValueError('''Invalid input''' ) SCREAMING_SNAKE_CASE_ = 10**n SCREAMING_SNAKE_CASE_ = 28_433 * (pow(2, 7_830_457, __a )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
715
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class snake_case ( __lowercase , unittest.TestCase ): UpperCAmelCase__ = TransfoXLTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = False def _lowercase (self ): """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE_ = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _lowercase (self , **SCREAMING_SNAKE_CASE_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _lowercase (self , SCREAMING_SNAKE_CASE_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running''' SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running''' return input_text, output_text def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' SCREAMING_SNAKE_CASE_ = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
628
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
445
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ): a :Optional[Any] = parent a :str = batch_size a :Tuple = seq_length a :List[Any] = is_training a :Optional[int] = use_attention_mask a :List[str] = use_token_type_ids a :str = use_labels a :Optional[Any] = vocab_size a :Optional[int] = hidden_size a :Tuple = num_hidden_layers a :Union[str, Any] = num_attention_heads a :int = intermediate_size a :int = hidden_act a :int = hidden_dropout_prob a :Union[str, Any] = attention_probs_dropout_prob a :str = max_position_embeddings a :Dict = type_vocab_size a :str = type_sequence_label_size a :List[str] = initializer_range a :Optional[Any] = num_choices def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a :Any = None if self.use_attention_mask: a :Any = random_attention_mask([self.batch_size, self.seq_length] ) a :Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCamelCase , ) return config, input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.prepare_config_and_inputs() a , a , a :str = config_and_inputs a :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = FlaxDistilBertModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_class_name in self.all_model_classes: a :int = model_class_name.from_pretrained('''distilbert-base-uncased''' ) a :List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) a :Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) a :List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) a :List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0] a :Union[str, Any] = (1, 11, 768) self.assertEqual(output.shape , _lowerCamelCase ) a :int = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
445
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[Any] = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin SCREAMING_SNAKE_CASE__ : Dict = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ): def _snake_case ( self ) -> str: """simple docstring""" a__ : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() a__ : Dict = load_tool("text-question-answering" , remote=snake_case ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> int: """simple docstring""" a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
629
1
'''simple docstring''' import math def _UpperCAmelCase ( _lowerCamelCase : int ) -> bool: assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _lowerCAmelCase : int = range(3 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int=1 , **_lowerCamelCase : Dict ) -> Tuple: _lowerCAmelCase : Optional[int] = factor * value _lowerCAmelCase : Tuple = value while not is_prime(_lowerCamelCase ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **_lowerCamelCase ) return value
384
'''simple docstring''' class a_ : def __init__( self , snake_case_ ): _lowerCAmelCase : Tuple = len(snake_case_ ) _lowerCAmelCase : Optional[int] = [0] * len_array if len_array > 0: _lowerCAmelCase : Dict = array[0] for i in range(1 , snake_case_ ): _lowerCAmelCase : Tuple = self.prefix_sum[i - 1] + array[i] def __UpperCamelCase ( self , snake_case_ , snake_case_ ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __UpperCamelCase ( self , snake_case_ ): _lowerCAmelCase : Tuple = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(snake_case_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
384
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = '''ResNetConfig''' # Base docstring _lowerCAmelCase = '''microsoft/resnet-50''' _lowerCAmelCase = [1, 2048, 7, 7] # Image classification docstring _lowerCAmelCase = '''microsoft/resnet-50''' _lowerCAmelCase = '''tiger cat''' _lowerCAmelCase = [ '''microsoft/resnet-50''', # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowerCAmelCase_( nn.Module ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Dict: super().__init__() lowerCAmelCase__ : int = nn.Convad( __UpperCAmelCase ,__UpperCAmelCase ,kernel_size=__UpperCAmelCase ,stride=__UpperCAmelCase ,padding=kernel_size // 2 ,bias=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = nn.BatchNormad(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tensor: lowerCAmelCase__ : List[Any] = self.convolution(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = self.normalization(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.activation(__UpperCAmelCase ) return hidden_state class lowerCAmelCase_( nn.Module ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> int: super().__init__() lowerCAmelCase__ : Optional[int] = ResNetConvLayer( config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act ) lowerCAmelCase__ : Optional[Any] = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 ) lowerCAmelCase__ : Union[str, Any] = config.num_channels def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tensor: lowerCAmelCase__ : List[Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowerCAmelCase__ : Any = self.embedder(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = self.pooler(__UpperCAmelCase ) return embedding class lowerCAmelCase_( nn.Module ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ) -> Tuple: super().__init__() lowerCAmelCase__ : List[Any] = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,stride=__UpperCAmelCase ,bias=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = nn.BatchNormad(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tensor: lowerCAmelCase__ : int = self.convolution(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.normalization(__UpperCAmelCase ) return hidden_state class lowerCAmelCase_( nn.Module ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Optional[int]: super().__init__() lowerCAmelCase__ : List[Any] = in_channels != out_channels or stride != 1 lowerCAmelCase__ : Optional[Any] = ( ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) lowerCAmelCase__ : Optional[Any] = nn.Sequential( ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,activation=__UpperCAmelCase ) ,) lowerCAmelCase__ : Union[str, Any] = ACTaFN[activation] def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = hidden_state lowerCAmelCase__ : Optional[Any] = self.layer(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = self.shortcut(__UpperCAmelCase ) hidden_state += residual lowerCAmelCase__ : Dict = self.activation(__UpperCAmelCase ) return hidden_state class lowerCAmelCase_( nn.Module ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ,__UpperCAmelCase = 4 ) -> str: super().__init__() lowerCAmelCase__ : str = in_channels != out_channels or stride != 1 lowerCAmelCase__ : Optional[int] = out_channels // reduction lowerCAmelCase__ : List[str] = ( ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) lowerCAmelCase__ : Optional[int] = nn.Sequential( ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,activation=__UpperCAmelCase ) ,) lowerCAmelCase__ : str = ACTaFN[activation] def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Dict = hidden_state lowerCAmelCase__ : Dict = self.layer(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = self.shortcut(__UpperCAmelCase ) hidden_state += residual lowerCAmelCase__ : Optional[Any] = self.activation(__UpperCAmelCase ) return hidden_state class lowerCAmelCase_( nn.Module ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,) -> Optional[Any]: super().__init__() lowerCAmelCase__ : Tuple = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer lowerCAmelCase__ : Union[str, Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ,activation=config.hidden_act ) ,*[layer(__UpperCAmelCase ,__UpperCAmelCase ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tensor: lowerCAmelCase__ : List[Any] = input for layer in self.layers: lowerCAmelCase__ : int = layer(__UpperCAmelCase ) return hidden_state class lowerCAmelCase_( nn.Module ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> List[str]: super().__init__() lowerCAmelCase__ : str = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( __UpperCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) ) lowerCAmelCase__ : List[Any] = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__UpperCAmelCase ,config.depths[1:] ): self.stages.append(ResNetStage(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,depth=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = True ) -> BaseModelOutputWithNoAttention: lowerCAmelCase__ : Dict = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCAmelCase__ : Dict = hidden_states + (hidden_state,) lowerCAmelCase__ : Dict = stage_module(__UpperCAmelCase ) if output_hidden_states: lowerCAmelCase__ : Optional[int] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=__UpperCAmelCase ,hidden_states=__UpperCAmelCase ,) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Tuple = ResNetConfig __lowercase : Union[str, Any] = '''resnet''' __lowercase : int = '''pixel_values''' __lowercase : Optional[int] = True def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: if isinstance(__UpperCAmelCase ,nn.Convad ): nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" ) elif isinstance(__UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight ,1 ) nn.init.constant_(module.bias ,0 ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : str = value _lowerCAmelCase = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCAmelCase = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE_ , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> Union[str, Any]: super().__init__(__UpperCAmelCase ) lowerCAmelCase__ : Any = config lowerCAmelCase__ : Dict = ResNetEmbeddings(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = ResNetEncoder(__UpperCAmelCase ) lowerCAmelCase__ : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention: lowerCAmelCase__ : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ : Any = self.embedder(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.encoder( __UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase ) lowerCAmelCase__ : List[str] = encoder_outputs[0] lowerCAmelCase__ : List[str] = self.pooler(__UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase ,pooler_output=__UpperCAmelCase ,hidden_states=encoder_outputs.hidden_states ,) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE_ , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> Optional[Any]: super().__init__(__UpperCAmelCase ) lowerCAmelCase__ : Any = config.num_labels lowerCAmelCase__ : Dict = ResNetModel(__UpperCAmelCase ) # classification head lowerCAmelCase__ : Any = nn.Sequential( nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> ImageClassifierOutputWithNoAttention: lowerCAmelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ : List[str] = self.resnet(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase__ : Tuple = self.classifier(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase__ : str = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase__ : int = """single_label_classification""" else: lowerCAmelCase__ : Union[str, Any] = """multi_label_classification""" if self.config.problem_type == "regression": lowerCAmelCase__ : Any = MSELoss() if self.num_labels == 1: lowerCAmelCase__ : Any = loss_fct(logits.squeeze() ,labels.squeeze() ) else: lowerCAmelCase__ : Dict = loss_fct(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase__ : Union[str, Any] = CrossEntropyLoss() lowerCAmelCase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase__ : int = BCEWithLogitsLoss() lowerCAmelCase__ : int = loss_fct(__UpperCAmelCase ,__UpperCAmelCase ) if not return_dict: lowerCAmelCase__ : Tuple = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase ,logits=__UpperCAmelCase ,hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , SCREAMING_SNAKE_CASE_ , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> List[str]: super().__init__(__UpperCAmelCase ) super()._init_backbone(__UpperCAmelCase ) lowerCAmelCase__ : Any = [config.embedding_size] + config.hidden_sizes lowerCAmelCase__ : Union[str, Any] = ResNetEmbeddings(__UpperCAmelCase ) lowerCAmelCase__ : int = ResNetEncoder(__UpperCAmelCase ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @replace_return_docstrings(output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BackboneOutput: lowerCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ : Optional[Any] = self.embedder(__UpperCAmelCase ) lowerCAmelCase__ : int = self.encoder(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = outputs.hidden_states lowerCAmelCase__ : Tuple = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: lowerCAmelCase__ : Tuple = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=__UpperCAmelCase ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__UpperCAmelCase ,)
160
'''simple docstring''' import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' _lowerCAmelCase = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' _lowerCAmelCase = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="dummy_doc" ): """simple docstring""" lowerCAmelCase__ : str = {doc: key_lines} lowerCAmelCase__ : Tuple = {doc: sys_lines} lowerCAmelCase__ : int = {} lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : int = 0 lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Union[str, Any] = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ , lowerCAmelCase__ : Any = reader.get_doc_mentions(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: lowerCAmelCase__ : Optional[int] = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader.get_doc_mentions(UpperCamelCase , sys_doc_lines[doc] , UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCAmelCase__ : List[str] = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase ) if remove_nested: lowerCAmelCase__ , lowerCAmelCase__ : str = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCAmelCase__ , lowerCAmelCase__ : Any = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCAmelCase__ : Optional[int] = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( """Number of resulting singleton clusters in the key """ f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ """files, respectively""" ) return doc_coref_infos def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = get_coref_infos(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = {} lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : str = 0 for name, metric in metrics: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = evaluator.evaluate_documents(UpperCamelCase , UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , ) if conll_subparts_num == 3: lowerCAmelCase__ : Any = (conll / 3) * 100 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({"""conll_score""": conll} ) return output_scores def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: lowerCAmelCase__ : List[Any] = line.split()[5] if not parse_col == "-": lowerCAmelCase__ : Union[str, Any] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) ,codebase_urls=["""https://github.com/ns-moosavi/coval"""] ,reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ) -> str: lowerCAmelCase__ : List[str] = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: lowerCAmelCase__ : Optional[int] = util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCAmelCase__ : Dict = evaluate( key_lines=__UpperCAmelCase ,sys_lines=__UpperCAmelCase ,metrics=__UpperCAmelCase ,NP_only=__UpperCAmelCase ,remove_nested=__UpperCAmelCase ,keep_singletons=__UpperCAmelCase ,min_span=__UpperCAmelCase ,) return score
160
1
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCAmelCase_ ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def __UpperCAmelCase ( self ): UpperCAmelCase__ : int = self.dummy_uncond_unet UpperCAmelCase__ : Any = KarrasVeScheduler() UpperCAmelCase__ : Union[str, Any] = KarrasVePipeline(unet=_snake_case , scheduler=_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase__ : int = torch.manual_seed(0 ) UpperCAmelCase__ : Dict = pipe(num_inference_steps=2 , generator=_snake_case , output_type="""numpy""" ).images UpperCAmelCase__ : Dict = torch.manual_seed(0 ) UpperCAmelCase__ : Union[str, Any] = pipe(num_inference_steps=2 , generator=_snake_case , output_type="""numpy""" , return_dict=_snake_case )[0] UpperCAmelCase__ : int = image[0, -3:, -3:, -1] UpperCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[Any] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase__ : int = UNetaDModel.from_pretrained(_snake_case ) UpperCAmelCase__ : Optional[int] = KarrasVeScheduler() UpperCAmelCase__ : List[str] = KarrasVePipeline(unet=_snake_case , scheduler=_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase__ : Tuple = torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe(num_inference_steps=20 , generator=_snake_case , output_type="""numpy""" ).images UpperCAmelCase__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase__ : Union[str, Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A : Union[str, Any] = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
349
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) def _UpperCamelCase ( *__A , **__A ) -> Optional[int]: '''simple docstring''' requires_backends(__A , ["torch"] ) def _UpperCamelCase ( *__A , **__A ) -> Tuple: '''simple docstring''' requires_backends(__A , ["torch"] ) def _UpperCamelCase ( *__A , **__A ) -> List[str]: '''simple docstring''' requires_backends(__A , ["torch"] ) def _UpperCamelCase ( *__A , **__A ) -> Union[str, Any]: '''simple docstring''' requires_backends(__A , ["torch"] ) def _UpperCamelCase ( *__A , **__A ) -> List[str]: '''simple docstring''' requires_backends(__A , ["torch"] ) def _UpperCamelCase ( *__A , **__A ) -> Tuple: '''simple docstring''' requires_backends(__A , ["torch"] ) def _UpperCamelCase ( *__A , **__A ) -> Optional[int]: '''simple docstring''' requires_backends(__A , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) class lowercase_ ( metaclass=a__ ): __UpperCAmelCase = ['torch'] def __init__( self , *a , **a ): requires_backends(self , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] ) @classmethod def __a ( cls , *a , **a ): requires_backends(cls , ["torch"] )
701
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor a__ : Any = logging.get_logger(__name__) class lowercase_ ( a__ ): def __init__( self , *a , **a ): warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , a , ) super().__init__(*a , **a )
223
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ): lowercase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A_ ( __lowerCamelCase ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case , ): super().__init__() self.register_modules( unet=snake_case , scheduler=snake_case , movq=snake_case , ) lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): if latents is None: lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) lowercase = latents.to(snake_case ) lowercase = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowercase = torch.device(F'''cuda:{gpu_id}''' ) lowercase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ): if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) lowercase = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=snake_case ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case ) # We'll offload the last model manually. lowercase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ): if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(snake_case , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(snake_case ) def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ): lowercase = self._execution_device lowercase = guidance_scale > 1.0 if isinstance(snake_case , snake_case ): lowercase = torch.cat(snake_case , dim=0 ) lowercase = image_embeds.shape[0] * num_images_per_prompt if isinstance(snake_case , snake_case ): lowercase = torch.cat(snake_case , dim=0 ) if do_classifier_free_guidance: lowercase = image_embeds.repeat_interleave(snake_case , dim=0 ) lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 ) lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case ) self.scheduler.set_timesteps(snake_case , device=snake_case ) lowercase = self.scheduler.timesteps lowercase = self.unet.config.in_channels lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor ) # create initial latent lowercase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , ) for i, t in enumerate(self.progress_bar(snake_case ) ): # expand the latents if we are doing classifier free guidance lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase = {'image_embeds': image_embeds} lowercase = self.unet( sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0] if do_classifier_free_guidance: lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 ) lowercase , lowercase = noise_pred.chunk(2 ) lowercase , lowercase = variance_pred.chunk(2 ) lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase = self.scheduler.step( snake_case , snake_case , snake_case , generator=snake_case , )[0] # post-processing lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowercase = image * 0.5 + 0.5 lowercase = image.clamp(0 , 1 ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase = self.numpy_to_pil(snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case )
84
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : jnp.ndarray _UpperCamelCase : jnp.ndarray class A_ ( nn.Module ): '''simple docstring''' _UpperCamelCase : int _UpperCamelCase : Tuple[int] = (16, 32, 96, 256) _UpperCamelCase : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE__ ( self ): lowercase = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowercase = [] for i in range(len(self.block_out_channels ) - 1 ): lowercase = self.block_out_channels[i] lowercase = self.block_out_channels[i + 1] lowercase = nn.Conv( snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(snake_case ) lowercase = nn.Conv( snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(snake_case ) lowercase = blocks lowercase = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , snake_case ): lowercase = self.conv_in(snake_case ) lowercase = nn.silu(snake_case ) for block in self.blocks: lowercase = block(snake_case ) lowercase = nn.silu(snake_case ) lowercase = self.conv_out(snake_case ) return embedding @flax_register_to_config class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : int = 32 _UpperCamelCase : int = 4 _UpperCamelCase : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _UpperCamelCase : Union[bool, Tuple[bool]] = False _UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280) _UpperCamelCase : int = 2 _UpperCamelCase : Union[int, Tuple[int]] = 8 _UpperCamelCase : Optional[Union[int, Tuple[int]]] = None _UpperCamelCase : int = 1280 _UpperCamelCase : float = 0.0 _UpperCamelCase : bool = False _UpperCamelCase : jnp.dtype = jnp.floataa _UpperCamelCase : bool = True _UpperCamelCase : int = 0 _UpperCamelCase : str = "rgb" _UpperCamelCase : Tuple[int] = (16, 32, 96, 256) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): # init input tensors lowercase = (1, self.in_channels, self.sample_size, self.sample_size) lowercase = jnp.zeros(snake_case , dtype=jnp.floataa ) lowercase = jnp.ones((1,) , dtype=jnp.intaa ) lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8) lowercase = jnp.zeros(snake_case , dtype=jnp.floataa ) lowercase , lowercase = jax.random.split(snake_case ) lowercase = {'params': params_rng, 'dropout': dropout_rng} return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"] def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.block_out_channels lowercase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowercase = self.num_attention_heads or self.attention_head_dim # input lowercase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowercase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype ) lowercase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) lowercase = self.only_cross_attention if isinstance(snake_case , snake_case ): lowercase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(snake_case , snake_case ): lowercase = (num_attention_heads,) * len(self.down_block_types ) # down lowercase = [] lowercase = [] lowercase = block_out_channels[0] lowercase = nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(snake_case ) for i, down_block_type in enumerate(self.down_block_types ): lowercase = output_channel lowercase = block_out_channels[i] lowercase = i == len(snake_case ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowercase = FlaxCrossAttnDownBlockaD( in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: lowercase = FlaxDownBlockaD( in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case ) for _ in range(self.layers_per_block ): lowercase = nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(snake_case ) if not is_final_block: lowercase = nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(snake_case ) lowercase = down_blocks lowercase = controlnet_down_blocks # mid lowercase = block_out_channels[-1] lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) lowercase = nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ): lowercase = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowercase = jnp.flip(snake_case , axis=1 ) # 1. time if not isinstance(snake_case , jnp.ndarray ): lowercase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0: lowercase = timesteps.astype(dtype=jnp.floataa ) lowercase = jnp.expand_dims(snake_case , 0 ) lowercase = self.time_proj(snake_case ) lowercase = self.time_embedding(snake_case ) # 2. pre-process lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) ) lowercase = self.conv_in(snake_case ) lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) ) lowercase = self.controlnet_cond_embedding(snake_case ) sample += controlnet_cond # 3. down lowercase = (sample,) for down_block in self.down_blocks: if isinstance(snake_case , snake_case ): lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train ) else: lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train ) # 5. contronet blocks lowercase = () for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ): lowercase = controlnet_block(snake_case ) controlnet_down_block_res_samples += (down_block_res_sample,) lowercase = controlnet_down_block_res_samples lowercase = self.controlnet_mid_block(snake_case ) # 6. scaling lowercase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
84
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ """SEW_PRETRAINED_MODEL_ARCHIVE_LIST""", """SEWForCTC""", """SEWForSequenceClassification""", """SEWModel""", """SEWPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
486
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Tuple = 32 def A_( A , A = 16 ): UpperCAmelCase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCAmelCase_ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ = datasets.map( A , batched=A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ = 8 else: UpperCAmelCase_ = None return tokenizer.pad( A , padding="""longest""" , max_length=A , pad_to_multiple_of=A , return_tensors="""pt""" , ) # Instantiate dataloaders. UpperCAmelCase_ = DataLoader( tokenized_datasets["""train"""] , shuffle=A , collate_fn=A , batch_size=A ) UpperCAmelCase_ = DataLoader( tokenized_datasets["""validation"""] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : str = mocked_dataloaders # noqa: F811 def A_( A , A ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A ) == "1": UpperCAmelCase_ = 2 # New Code # UpperCAmelCase_ = int(args.gradient_accumulation_steps ) # Initialize accelerator UpperCAmelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ = config["""lr"""] UpperCAmelCase_ = int(config["""num_epochs"""] ) UpperCAmelCase_ = int(config["""seed"""] ) UpperCAmelCase_ = int(config["""batch_size"""] ) UpperCAmelCase_ = evaluate.load("""glue""" , """mrpc""" ) set_seed(A ) UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(A , A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ = AdamW(params=model.parameters() , lr=A ) # Instantiate scheduler UpperCAmelCase_ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare( A , A , A , A , A ) # Now we train the model for epoch in range(A ): model.train() for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(A ): UpperCAmelCase_ = model(**A ) UpperCAmelCase_ = output.loss accelerator.backward(A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ = model(**A ) UpperCAmelCase_ = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=A , references=A , ) UpperCAmelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , A ) def A_( ): UpperCAmelCase_ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=A , default=A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=A , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(A , A ) if __name__ == "__main__": main()
486
1
'''simple docstring''' UpperCamelCase_ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
28
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : str = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
348
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
705
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class a_ ( snake_case ): UpperCAmelCase : Optional[int] = """markuplm""" def __init__( self : Any , a_ : Optional[int]=3_0_5_2_2 , a_ : Optional[int]=7_6_8 , a_ : List[Any]=1_2 , a_ : int=1_2 , a_ : int=3_0_7_2 , a_ : int="gelu" , a_ : str=0.1 , a_ : Tuple=0.1 , a_ : List[str]=5_1_2 , a_ : Any=2 , a_ : Union[str, Any]=0.0_2 , a_ : Any=1E-1_2 , a_ : Optional[int]=0 , a_ : str=0 , a_ : Optional[Any]=2 , a_ : Optional[Any]=2_5_6 , a_ : Tuple=1_0_2_4 , a_ : List[str]=2_1_6 , a_ : List[str]=1_0_0_1 , a_ : Optional[int]=3_2 , a_ : Optional[int]=5_0 , a_ : Optional[int]="absolute" , a_ : Union[str, Any]=True , a_ : List[Any]=None , **a_ : Union[str, Any] , ) -> Optional[int]: super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , ) snake_case: Tuple =vocab_size snake_case: int =hidden_size snake_case: Optional[int] =num_hidden_layers snake_case: List[str] =num_attention_heads snake_case: Dict =hidden_act snake_case: List[Any] =intermediate_size snake_case: List[str] =hidden_dropout_prob snake_case: Dict =attention_probs_dropout_prob snake_case: List[Any] =max_position_embeddings snake_case: Optional[int] =type_vocab_size snake_case: Optional[int] =initializer_range snake_case: List[str] =layer_norm_eps snake_case: Optional[int] =position_embedding_type snake_case: int =use_cache snake_case: Tuple =classifier_dropout # additional properties snake_case: Dict =max_depth snake_case: Optional[Any] =max_xpath_tag_unit_embeddings snake_case: List[str] =max_xpath_subs_unit_embeddings snake_case: Optional[int] =tag_pad_id snake_case: List[str] =subs_pad_id snake_case: str =xpath_unit_hidden_size
347
0
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a : def __init__( self : str , snake_case__ : Dict , snake_case__ : Optional[int]=13 , snake_case__ : Any=30 , snake_case__ : int=2 , snake_case__ : List[Any]=3 , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Dict=32 , snake_case__ : Union[str, Any]=5 , snake_case__ : Tuple=4 , snake_case__ : Tuple=37 , snake_case__ : Tuple="gelu" , snake_case__ : Any=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=10 , snake_case__ : List[str]=0.0_2 , snake_case__ : int=3 , snake_case__ : Tuple=None , snake_case__ : Any=2 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 2 def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] ): """simple docstring""" __lowerCAmelCase = DeiTModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() __lowerCAmelCase = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): """simple docstring""" __lowerCAmelCase = DeiTForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() __lowerCAmelCase = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForMaskedImageModeling(snake_case__ ) model.to(snake_case__ ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): """simple docstring""" __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = DeiTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() __lowerCAmelCase = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowercase_ : str = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase_ : int = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase_ : List[str] = False lowercase_ : Dict = False lowercase_ : str = False def UpperCAmelCase__ ( self : str ): """simple docstring""" __lowerCAmelCase = DeiTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" pass def UpperCAmelCase__ ( self : int ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(snake_case__ ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def UpperCAmelCase__ ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False ): """simple docstring""" __lowerCAmelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" if not self.model_tester.is_training: return __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(snake_case__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __lowerCAmelCase = model_class(snake_case__ ) model.to(snake_case__ ) model.train() __lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) __lowerCAmelCase = model(**snake_case__ ).loss loss.backward() def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCAmelCase = False __lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __lowerCAmelCase = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() __lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) __lowerCAmelCase = model(**snake_case__ ).loss loss.backward() def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(snake_case__ ), *get_values(snake_case__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ): __lowerCAmelCase = problem_type["title"] __lowerCAmelCase = problem_type["num_labels"] __lowerCAmelCase = model_class(snake_case__ ) model.to(snake_case__ ) model.train() __lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if problem_type["num_labels"] > 1: __lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=snake_case__ ) as warning_list: __lowerCAmelCase = model(**snake_case__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : str ): """simple docstring""" __lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( snake_case__ ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**snake_case__ ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) __lowerCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(snake_case__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(snake_case__ )
611
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a ( unittest.TestCase , __UpperCAmelCase ): def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __lowerCAmelCase = load_tool("text-to-speech" ) self.tool.setup() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = self.tool("hey" ) __lowerCAmelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = self.tool("hey" ) __lowerCAmelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
611
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer _A = logging.get_logger(__name__) _A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart _A = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } _A = { 'facebook/bart-base': 1024, 'facebook/bart-large': 1024, 'facebook/bart-large-mnli': 1024, 'facebook/bart-large-cnn': 1024, 'facebook/bart-large-xsum': 1024, 'yjernite/bart_eli5': 1024, } class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Tuple = ["input_ids", "attention_mask"] UpperCAmelCase__ : int = BartTokenizer def __init__( self , A_=None , A_=None , A_=None , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , A_=True , **A_ , ) -> List[str]: super().__init__( A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , ) __UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , A_ ) != add_prefix_space: __UpperCamelCase =getattr(A_ , pre_tok_state.pop('type' ) ) __UpperCamelCase =add_prefix_space __UpperCamelCase =pre_tok_class(**A_ ) __UpperCamelCase =add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __UpperCamelCase ='post_processor' __UpperCamelCase =getattr(self.backend_tokenizer , A_ , A_ ) if tokenizer_component_instance: __UpperCamelCase =json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __UpperCamelCase =tuple(state['sep'] ) if "cls" in state: __UpperCamelCase =tuple(state['cls'] ) __UpperCamelCase =False if state.get('add_prefix_space' , A_ ) != add_prefix_space: __UpperCamelCase =add_prefix_space __UpperCamelCase =True if state.get('trim_offsets' , A_ ) != trim_offsets: __UpperCamelCase =trim_offsets __UpperCamelCase =True if changes_to_apply: __UpperCamelCase =getattr(A_ , state.pop('type' ) ) __UpperCamelCase =component_class(**A_ ) setattr(self.backend_tokenizer , A_ , A_ ) @property def _a ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _a ( self , A_ ) -> Dict: __UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value __UpperCamelCase =value def _a ( self , *A_ , **A_ ) -> BatchEncoding: __UpperCamelCase =kwargs.get('is_split_into_words' , A_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*A_ , **A_ ) def _a ( self , *A_ , **A_ ) -> BatchEncoding: __UpperCamelCase =kwargs.get('is_split_into_words' , A_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*A_ , **A_ ) def _a ( self , A_ , A_ = None ) -> Tuple[str]: __UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ ) return tuple(A_ ) def _a ( self , A_ , A_=None ) -> Dict: __UpperCamelCase =[self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self , A_ , A_ = None ) -> List[int]: __UpperCamelCase =[self.sep_token_id] __UpperCamelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
682
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any: __UpperCamelCase =parent __UpperCamelCase =batch_size __UpperCamelCase =image_size __UpperCamelCase =patch_size __UpperCamelCase =num_channels __UpperCamelCase =is_training __UpperCamelCase =use_labels __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =intermediate_size __UpperCamelCase =hidden_act __UpperCamelCase =hidden_dropout_prob __UpperCamelCase =attention_probs_dropout_prob __UpperCamelCase =type_sequence_label_size __UpperCamelCase =initializer_range __UpperCamelCase =scope __UpperCamelCase =backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size __UpperCamelCase =(self.image_size // 32) ** 2 __UpperCamelCase =num_patches + 1 def _a ( self ) -> str: __UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase =None if self.use_labels: __UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase =self.get_config() return config, pixel_values, labels def _a ( self ) -> Union[str, Any]: __UpperCamelCase ={ 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , ) def _a ( self , A_ , A_ , A_ ) -> Optional[Any]: __UpperCamelCase =ViTHybridModel(config=A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , A_ , A_ , A_ ) -> Optional[int]: __UpperCamelCase =self.type_sequence_label_size __UpperCamelCase =ViTHybridForImageClassification(A_ ) model.to(A_ ) model.eval() __UpperCamelCase =model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self ) -> List[Any]: __UpperCamelCase =self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs __UpperCamelCase ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCAmelCase__ : Union[str, Any] = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = False def _a ( self ) -> Optional[Any]: __UpperCamelCase =ViTHybridModelTester(self ) __UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def _a ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _a ( self ) -> List[str]: pass def _a ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase =model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCamelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def _a ( self ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase =model_class(A_ ) __UpperCamelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase =[*signature.parameters.keys()] __UpperCamelCase =['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def _a ( self ) -> List[str]: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def _a ( self ) -> int: __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase =_config_zero_init(A_ ) for model_class in self.all_model_classes: __UpperCamelCase =model_class(config=A_ ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": __UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def _a ( self ) -> int: for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase =ViTHybridModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _UpperCAmelCase ( ): __UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self ) -> Union[str, Any]: return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self ) -> str: __UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( A_ ) __UpperCamelCase =self.default_image_processor __UpperCamelCase =prepare_img() __UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): __UpperCamelCase =model(**A_ ) # verify the logits __UpperCamelCase =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , A_ ) __UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) @slow @require_accelerate def _a ( self ) -> Optional[int]: __UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) __UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' ) __UpperCamelCase =prepare_img() __UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ) __UpperCamelCase =model(**A_ ) __UpperCamelCase =outputs.logits # model predicts one of the 1000 ImageNet classes __UpperCamelCase =logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
682
1
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate a = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('''''', '''|''', '''|'''), datarow=DataRow('''''', '''|''', '''|'''), padding=1, with_header_hide=None, ) a = [] a = [] a = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}} a = [ { '''type''': '''header''', '''text''': { '''type''': '''plain_text''', '''text''': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''', '''emoji''': True, }, } ] a = 0 for log in Path().glob('''*.log'''): a = 0 with open(log, '''r''') as f: for line in f: a = json.loads(line) if line.get('''nodeid''', '''''') != "": a = line['''nodeid'''] if line.get('''duration''', None) is not None: a = F'''{line["duration"]:.4f}''' if line.get('''outcome''', '''''') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('''_''')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) a = [] log.unlink() a = '''''' a = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" a = [] a = {} for test in failed_tests: a = test[0].split('''::''') a = data[0].split('''/''')[-1] if data[0] not in filesafailed: a = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) a = [test[0] for test in failed_table] a = list(set(files)) # Count number of instances in failed_tests a = [] for file in individual_files: table.append([file, len(filesafailed[file])]) a = tabulate( table, headers=['''Test Location''', '''Num Failed'''], tablefmt=hf_table_format, stralign='''right''', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_000: a = '''Too many failed tests, please see the full report in the Action results.''' a = len(err) + 10 a = message[: 3_000 - offset] + F'''\n...\n```\n{err}''' print(F'''### {message}''') else: a = '''No failed tests! 🤗''' print(F'''## {message}''') payload.append(no_error_payload) if os.environ.get('''TEST_TYPE''', '''''') != "": from slack_sdk import WebClient a = WebClient(token=os.environ['''SLACK_API_TOKEN''']) if message != "No failed tests! 🤗": a = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': message, }, } payload.append(md_report) a = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': '''*For more details:*''', }, '''accessory''': { '''type''': '''button''', '''text''': { '''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True, }, '''url''': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } payload.append(action_button) a = { '''type''': '''context''', '''elements''': [ { '''type''': '''plain_text''', '''text''': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''', } ], } payload.append(date_report) a = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload) a = response.data['''ts'''] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name a = '''''' for i, row in enumerate(test_failures): if row[0] != test_class: a = row[0] else: a = '''''' a = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''', }, } client.chat_postMessage( channel='''#accelerate-ci-daily''', thread_ts=ts, blocks=[payload], )
7
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def snake_case__ ( _snake_case : List[Any]=32 , _snake_case : Tuple=10 , _snake_case : str=1_00 , _snake_case : Optional[int]=10_26 , _snake_case : Any=True , _snake_case : str="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : Any="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set UpperCamelCase__ , UpperCamelCase__ = generate_datasets( _snake_case , _snake_case , number=_snake_case , min_len=10_26 , trim=_snake_case ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model UpperCamelCase__ = load_gpta("gpt2" ).to(_snake_case ) print("computing perplexity on objective set" ) UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case ).item() print("perplexity on objective set:" , _snake_case ) # collect igf pairs and save to file demo.jbl collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def snake_case__ ( _snake_case : Any , _snake_case : str=15 , _snake_case : str=1_28 , _snake_case : int=1_00 , _snake_case : Tuple="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model UpperCamelCase__ = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model UpperCamelCase__ = SecondaryLearner(_snake_case ) # Train secondary learner UpperCamelCase__ = train_secondary_learner( _snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=1_00 , igf_model_path=_snake_case , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def snake_case__ ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : List[str]=32 , _snake_case : Tuple=10_00 , _snake_case : List[Any]=16 , _snake_case : str=1.0 , _snake_case : List[str]=recopy_gpta , _snake_case : Optional[int]=None , _snake_case : Optional[int]=10 , _snake_case : Optional[int]="gpt2_finetuned.pt" , ): """simple docstring""" UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) UpperCamelCase__ = RandomSampler(_snake_case ) UpperCamelCase__ = DataLoader(_snake_case , sampler=_snake_case ) UpperCamelCase__ = max_steps // (len(_snake_case )) + 1 UpperCamelCase__ = 0 UpperCamelCase__ = torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = recopy_model(_snake_case , _snake_case , _snake_case ) model.train() if secondary_learner is not None: secondary_learner.to(_snake_case ) secondary_learner.eval() UpperCamelCase__ = [] UpperCamelCase__ = 0 UpperCamelCase__ = [] UpperCamelCase__ = [] # Compute the performance of the transformer model at the beginning UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case ) test_perps.append(_snake_case ) print("Test perplexity, step" , _snake_case , ":" , _snake_case ) for epoch in range(int(_snake_case ) ): for step, example in enumerate(_snake_case ): torch.cuda.empty_cache() UpperCamelCase__ = random.randint(0 , example.size(2 ) - context_len - 1 ) UpperCamelCase__ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase__ = model(_snake_case , labels=_snake_case ) UpperCamelCase__ = True if secondary_learner is not None: UpperCamelCase__ = secondary_learner.forward( torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_snake_case ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase__ = -1 if predicted_q < threshold: UpperCamelCase__ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) UpperCamelCase__ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase__ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case ) test_perps.append(_snake_case ) print("Test perplexity, step" , _snake_case , ":" , _snake_case ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _snake_case ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def snake_case__ ( ): """simple docstring""" UpperCamelCase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=_snake_case , type=_snake_case , required=_snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=_snake_case , default=_snake_case , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=_snake_case , default=_snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=_snake_case , type=_snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=_snake_case , default=_snake_case , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=_snake_case , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=1_00 , type=_snake_case , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=1_00 , type=_snake_case , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=10_00 , type=_snake_case , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=1_28 , type=_snake_case , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=_snake_case , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=_snake_case , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=1_00 , type=_snake_case , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=10_26 , type=_snake_case , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=_snake_case , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=_snake_case , type=_snake_case , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=_snake_case , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_snake_case , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=_snake_case , type=_snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner UpperCamelCase__ = joblib.load("data/IGF_values.jbl" ) # Train secondary learner UpperCamelCase__ = training_secondary_learner( _snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model UpperCamelCase__ = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase__ , UpperCamelCase__ = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=_snake_case ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _snake_case , _snake_case , _snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
516
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) __UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' ) __UpperCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids __UpperCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids __UpperCamelCase = model(input_ids.to(__UpperCAmelCase ) , labels=labels.to(__UpperCAmelCase ) ).loss __UpperCamelCase = -(labels.shape[-1] * loss.item()) __UpperCamelCase = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
293
"""simple docstring""" from __future__ import annotations import os from typing import Any import requests UpperCamelCase : Union[str, Any] = "https://api.github.com" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user UpperCamelCase : Union[str, Any] = BASE_URL + "/user" # https://github.com/settings/tokens UpperCamelCase : Optional[int] = os.environ.get("USER_TOKEN", "") def A ( snake_case :str ) -> dict[Any, Any]: __UpperCamelCase = { 'Authorization': f'token {auth_token}', 'Accept': 'application/vnd.github.v3+json', } return requests.get(snake_case , headers=snake_case ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError("'USER_TOKEN' field cannot be empty.")
293
1
import datasets __UpperCAmelCase = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' __UpperCAmelCase = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' __UpperCAmelCase = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def UpperCamelCase ( snake_case__ : str , snake_case__ : List[Any] ) -> Any: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def snake_case_ ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), } ), codebase_urls=[], reference_urls=[], format='numpy', ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )}
40
'''simple docstring''' import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowerCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__) lowerCAmelCase_ : Tuple = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' lowerCAmelCase_ : Union[str, Any] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' lowerCAmelCase_ : Union[str, Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase ( lowercase : Tuple , lowercase : List[Any] , lowercase : Optional[int]=False , lowercase : Dict=False , lowercase : Optional[int]=True , lowercase : Union[str, Any]=False , lowercase : int="dummy_doc" ) -> Union[str, Any]: _a = {doc: key_lines} _a = {doc: sys_lines} _a = {} _a = 0 _a = 0 _a = 0 _a = 0 _a = 0 _a = 0 _a , _a = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase ) key_singletons_num += singletons_num if NP_only or min_span: _a = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) _a , _a = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase ) sys_singletons_num += singletons_num if NP_only or min_span: _a = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase ) if remove_nested: _a , _a = reader.remove_nested_coref_mentions(lowercase , lowercase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _a , _a = reader.remove_nested_coref_mentions(lowercase , lowercase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _a = reader.get_mention_assignments(lowercase , lowercase ) _a = reader.get_mention_assignments(lowercase , lowercase ) _a = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( "Number of resulting singleton clusters in the key " F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' "files, respectively" ) return doc_coref_infos def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Any , lowercase : List[str] , lowercase : Dict ) -> str: _a = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) _a = {} _a = 0 _a = 0 for name, metric in metrics: _a , _a , _a = evaluator.evaluate_documents(lowercase , lowercase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , ) if conll_subparts_num == 3: _a = (conll / 3) * 100 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({"conll_score": conll} ) return output_scores def _lowerCamelCase ( lowercase : Any ) -> str: _a = False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: _a = line.split()[5] if not parse_col == "-": _a = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE (datasets.Metric ): """simple docstring""" def UpperCamelCase__ ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def UpperCamelCase__ ( self : int , __a : Any , __a : int , __a : Optional[Any]=True , __a : Optional[Any]=False , __a : str=False , __a : List[str]=False ): _a = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: _a = util.check_gold_parse_annotation(__a ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _a = evaluate( key_lines=__a , sys_lines=__a , metrics=__a , NP_only=__a , remove_nested=__a , keep_singletons=__a , min_span=__a , ) return score
692
0
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case () -> Dict: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT): with pytest.raises(_snake_case): requests.request('GET' , 'https://huggingface.co') with pytest.raises(requests.exceptions.ConnectTimeout): requests.request('GET' , 'https://huggingface.co' , timeout=1.0) @pytest.mark.integration def _snake_case () -> int: with offline(OfflineSimulationMode.CONNECTION_FAILS): with pytest.raises(requests.exceptions.ConnectionError): requests.request('GET' , 'https://huggingface.co') def _snake_case () -> int: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1): with pytest.raises(_snake_case): http_head('https://huggingface.co')
712
from math import isqrt, loga def _snake_case (_snake_case : int) -> list[int]: _lowercase =[True] * max_number for i in range(2 , isqrt(max_number - 1) + 1): if is_prime[i]: for j in range(i**2 , _snake_case , _snake_case): _lowercase =False return [i for i in range(2 , _snake_case) if is_prime[i]] def _snake_case (_snake_case : int = 80_0800 , _snake_case : int = 80_0800) -> int: _lowercase =degree * loga(_snake_case) _lowercase =int(_snake_case) _lowercase =calculate_prime_numbers(_snake_case) _lowercase =0 _lowercase =0 _lowercase =len(_snake_case) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left]) + prime_numbers[left] * loga(prime_numbers[right]) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
557
0
'''simple docstring''' import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters lowerCAmelCase : Optional[int] = logging.get_logger(__name__) def A_( A : Optional[int] , A : Any , A : Union[str, Any] , A : Optional[int]=None , A : List[str]=None): # Recurse if needed if "." in tensor_name: UpperCamelCase = tensor_name.split('.') for split in splits[:-1]: UpperCamelCase = getattr(A , A) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''') UpperCamelCase = new_module UpperCamelCase = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''') UpperCamelCase = tensor_name in module._buffers UpperCamelCase = getattr(A , A) if old_value.device == torch.device('meta') and device not in ["meta", torch.device('meta')] and value is None: raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''') UpperCamelCase = False UpperCamelCase = False if is_buffer or not is_bitsandbytes_available(): UpperCamelCase = False UpperCamelCase = False else: UpperCamelCase = hasattr(bnb.nn , 'Params4bit') and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit) UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams) if is_abit or is_abit: UpperCamelCase = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCamelCase = old_value.to(A) elif isinstance(A , torch.Tensor): UpperCamelCase = value.to('cpu') if value.dtype == torch.inta: UpperCamelCase = version.parse(importlib.metadata.version('bitsandbytes')) > version.parse( '0.37.2') if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.') else: UpperCamelCase = torch.tensor(A , device='cpu') # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , A) and fpaa_statistics is None: UpperCamelCase = new_value.T UpperCamelCase = old_value.__dict__ if is_abit: UpperCamelCase = bnb.nn.IntaParams(A , requires_grad=A , **A).to(A) elif is_abit: UpperCamelCase = bnb.nn.Paramsabit(A , requires_grad=A , **A).to(A) UpperCamelCase = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(A)) else: if value is None: UpperCamelCase = old_value.to(A) elif isinstance(A , torch.Tensor): UpperCamelCase = value.to(A) else: UpperCamelCase = torch.tensor(A , device=A) if is_buffer: UpperCamelCase = new_value else: UpperCamelCase = nn.Parameter(A , requires_grad=old_value.requires_grad) UpperCamelCase = new_value def A_( A : Union[str, Any] , A : str=None , A : List[str]=None , A : str=None , A : Dict=False): for name, module in model.named_children(): if current_key_name is None: UpperCamelCase = [] current_key_name.append(A) if (isinstance(A , nn.Linear) or isinstance(A , A)) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(A) for key in modules_to_not_convert): with init_empty_weights(): if isinstance(A , A): UpperCamelCase , UpperCamelCase = module.weight.shape else: UpperCamelCase = module.in_features UpperCamelCase = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCamelCase = bnb.nn.LinearabitLt( A , A , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCamelCase = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCamelCase = bnb.nn.Linearabit( A , A , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCamelCase = True # Store the module class in case we need to transpose the weight later UpperCamelCase = type(A) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(A) if len(list(module.children())) > 0: UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear( A , A , A , A , has_been_replaced=A , ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def A_( A : Union[str, Any] , A : List[Any]=None , A : Optional[Any]=None , A : str=None): UpperCamelCase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear( A , A , A , A) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.') return model def A_( *A : Tuple , **A : Optional[int]): warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , A , ) return replace_with_bnb_linear(*A , **A) def A_( *A : List[str] , **A : Tuple): warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , A , ) return set_module_quantized_tensor_to_device(*A , **A) def A_( A : int): UpperCamelCase = deepcopy(A) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCamelCase = find_tied_parameters(A) # For compatibility with Accelerate < 0.18 if isinstance(A , A): UpperCamelCase = sum(list(tied_params.values()) , []) + list(tied_params.keys()) else: UpperCamelCase = sum(A , []) UpperCamelCase = len(A) > 0 # Check if it is a base model UpperCamelCase = not hasattr(A , model.base_model_prefix) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase = list(model.named_children()) UpperCamelCase = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase = set(A) - set(A) UpperCamelCase = list(set(A)) + list(A) # remove ".weight" from the keys UpperCamelCase = ['.weight', '.bias'] UpperCamelCase = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase = name.replace(A , '') filtered_module_names.append(A) return filtered_module_names
3
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : Dict = logging.get_logger(__name__) # General docstring lowerCAmelCase : str = 'RegNetConfig' # Base docstring lowerCAmelCase : str = 'facebook/regnet-y-040' lowerCAmelCase : Dict = [1, 10_88, 7, 7] # Image classification docstring lowerCAmelCase : Dict = 'facebook/regnet-y-040' lowerCAmelCase : int = 'tabby, tabby cat' lowerCAmelCase : int = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str: '''simple docstring''' super().__init__(**A_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) UpperCamelCase = tf.keras.layers.ConvaD( filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , ) UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' ) UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCAmelCase_ ( self , A_ )-> Any: '''simple docstring''' UpperCamelCase = self.convolution(self.padding(A_ ) ) UpperCamelCase = self.normalization(A_ ) UpperCamelCase = self.activation(A_ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , **A_ )-> Optional[Any]: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = config.num_channels UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def UpperCAmelCase_ ( self , A_ )-> List[Any]: '''simple docstring''' UpperCamelCase = shape_list(A_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) ) UpperCamelCase = self.embedder(A_ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = tf.keras.layers.ConvaD( filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' ) UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' ) def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor: '''simple docstring''' return self.normalization(self.convolution(A_ ) , training=A_ ) class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , A_ , **A_ )-> Optional[Any]: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' ) UpperCamelCase = [ tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def UpperCAmelCase_ ( self , A_ )-> Optional[int]: '''simple docstring''' UpperCamelCase = self.pooler(A_ ) for layer_module in self.attention: UpperCamelCase = layer_module(A_ ) UpperCamelCase = hidden_state * pooled return hidden_state class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = in_channels != out_channels or stride != 1 UpperCamelCase = max(1 , out_channels // config.groups_width ) UpperCamelCase = ( TFRegNetShortCut(A_ , stride=A_ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. UpperCamelCase = [ TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ), ] UpperCamelCase = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self , A_ )-> Tuple: '''simple docstring''' UpperCamelCase = hidden_state for layer_module in self.layers: UpperCamelCase = layer_module(A_ ) UpperCamelCase = self.shortcut(A_ ) hidden_state += residual UpperCamelCase = self.activation(A_ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = in_channels != out_channels or stride != 1 UpperCamelCase = max(1 , out_channels // config.groups_width ) UpperCamelCase = ( TFRegNetShortCut(A_ , stride=A_ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) UpperCamelCase = [ TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ), ] UpperCamelCase = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self , A_ )-> List[Any]: '''simple docstring''' UpperCamelCase = hidden_state for layer_module in self.layers: UpperCamelCase = layer_module(A_ ) UpperCamelCase = self.shortcut(A_ ) hidden_state += residual UpperCamelCase = self.activation(A_ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ), *[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def UpperCAmelCase_ ( self , A_ )-> List[Any]: '''simple docstring''' for layer_module in self.layers: UpperCamelCase = layer_module(A_ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): def __init__( self , A_ , **A_ )-> str: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) ) def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention: '''simple docstring''' UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCamelCase = hidden_states + (hidden_state,) UpperCamelCase = stage_module(A_ ) if output_hidden_states: UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ ) @keras_serializable class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer): lowerCAmelCase_ = RegNetConfig def __init__( self , A_ , **A_ )-> Union[str, Any]: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = config UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' ) UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' ) UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' ) @unpack_inputs def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase = self.embedder(A_ , training=A_ ) UpperCamelCase = self.encoder( A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ ) UpperCamelCase = encoder_outputs[0] UpperCamelCase = self.pooler(A_ ) # Change to NCHW output format have uniformity in the modules UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) ) UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = RegNetConfig lowerCAmelCase_ = """regnet""" lowerCAmelCase_ = """pixel_values""" @property def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , ) class SCREAMING_SNAKE_CASE__ ( snake_case_): def __init__( self , A_ , *A_ , **A_ )-> List[Any]: '''simple docstring''' super().__init__(A_ , *A_ , **A_ ) UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: '''simple docstring''' UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase = self.regnet( pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , snake_case_ , ) class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_): def __init__( self , A_ , *A_ , **A_ )-> str: '''simple docstring''' super().__init__(A_ , *A_ , **A_ ) UpperCamelCase = config.num_labels UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' ) # classification head UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: '''simple docstring''' UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase = self.regnet( A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ ) UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] UpperCamelCase = self.classifier[0](A_ ) UpperCamelCase = self.classifier[1](A_ ) UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ ) if not return_dict: UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
3
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () _lowercase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). _lowercase = [0, 25, 50] _lowercase = [25, 50, 75] _lowercase = fuzz.membership.trimf(X, abca) _lowercase = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. _lowercase = np.ones(75) _lowercase = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) _lowercase = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) _lowercase = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) _lowercase = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) _lowercase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] _lowercase = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) _lowercase = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] _lowercase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] _lowercase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
96
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _A (UpperCamelCase : str ) ->None: '''simple docstring''' lowerCamelCase__ ,lowerCamelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCamelCase__ : Any = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCamelCase__ : str = sum(single_char_strings.values() ) # one length string lowerCamelCase__ : Union[str, Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCamelCase__ : List[Any] = single_char_strings[ch] lowerCamelCase__ : List[str] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"{round(-1 * my_fir_sum ):.1f}" ) # two len string lowerCamelCase__ : str = sum(two_char_strings.values() ) lowerCamelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCamelCase__ : str = cha + cha if sequence in two_char_strings: lowerCamelCase__ : str = two_char_strings[sequence] lowerCamelCase__ : int = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"{round(-1 * my_sec_sum ):.1f}" ) # print the difference between them print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" ) def _A (UpperCamelCase : str ) ->tuple[dict, dict]: '''simple docstring''' lowerCamelCase__ : Optional[int] = Counter() # type: ignore lowerCamelCase__ : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _A () ->List[str]: '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
96
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __magic_name__ : List[Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __magic_name__ : Union[str, Any] = TaTokenizerFast __magic_name__ : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : str = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Dict = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : int = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __magic_name__ : Union[str, Any] = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
615
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ : Optional[int] = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : int = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __magic_name__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
615
1
import os import sys import unittest UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) UpperCamelCase = os.path.join("tests", "models", "bert", "test_modeling_bert.py") UpperCamelCase = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : List[str] = get_test_to_tester_mapping(_lowerCAmelCase ) _lowercase : List[Any] = get_test_to_tester_mapping(_lowerCAmelCase ) _lowercase : Any = {'BertModelTest': 'BertModelTester'} _lowercase : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase ) def __a ( self ): _lowercase : List[Any] = get_model_to_test_mapping(_lowerCAmelCase ) _lowercase : List[Any] = get_model_to_test_mapping(_lowerCAmelCase ) _lowercase : List[str] = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } _lowercase : Optional[int] = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase ) def __a ( self ): _lowercase : str = get_model_to_tester_mapping(_lowerCAmelCase ) _lowercase : Tuple = get_model_to_tester_mapping(_lowerCAmelCase ) _lowercase : List[str] = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } _lowercase : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
677
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: for attribute in key.split('.' ): _lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: _lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: _lowercase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowercase : List[str] = value elif weight_type == "weight_g": _lowercase : Any = value elif weight_type == "weight_v": _lowercase : Tuple = value elif weight_type == "bias": _lowercase : List[str] = value else: _lowercase : Dict = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[int] = [] _lowercase : Optional[int] = fairseq_model.state_dict() _lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowercase : Dict = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) _lowercase : int = True else: for key, mapped_key in MAPPING.items(): _lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): _lowercase : Union[str, Any] = True if "*" in mapped_key: _lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] _lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: _lowercase : Optional[int] = 'weight_g' elif "weight_v" in name: _lowercase : Optional[Any] = 'weight_v' elif "weight" in name: _lowercase : str = 'weight' elif "bias" in name: _lowercase : Any = 'bias' else: _lowercase : str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Any = full_name.split('conv_layers.' )[-1] _lowercase : Any = name.split('.' ) _lowercase : Optional[Any] = int(items[0] ) _lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowercase : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowercase : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowercase : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowercase : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: if config_path is not None: _lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertConfig() if is_finetuned: if dict_path: _lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : Dict = target_dict.pad_index _lowercase : Dict = target_dict.bos_index _lowercase : Tuple = target_dict.eos_index _lowercase : List[Any] = len(target_dict.symbols ) _lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) _lowercase : int = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , ) _lowercase : str = True if config.feat_extract_norm == 'layer' else False _lowercase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) _lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: _lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowercase : int = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
677
1
import math import sys import cva import numpy as np def __a ( lowerCAmelCase_ : np.ndarray ,lowerCAmelCase_ : float ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_= math.sqrt(lowerCAmelCase_ ) UpperCAmelCase_= 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __a ( lowerCAmelCase_ : np.ndarray ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_= kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : float ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_= np.zeros((kernel_size, kernel_size) ) for i in range(0 ,lowerCAmelCase_ ): for j in range(0 ,lowerCAmelCase_ ): UpperCAmelCase_= math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(lowerCAmelCase_ ,lowerCAmelCase_ ) def __a ( lowerCAmelCase_ : np.ndarray ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : int ,) -> np.ndarray: '''simple docstring''' UpperCAmelCase_= np.zeros(img.shape ) UpperCAmelCase_= get_gauss_kernel(lowerCAmelCase_ ,lowerCAmelCase_ ) UpperCAmelCase_, UpperCAmelCase_= img.shape for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ): for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ): UpperCAmelCase_= get_slice(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) UpperCAmelCase_= img_s - img_s[kernel_size // 2, kernel_size // 2] UpperCAmelCase_= vec_gaussian(lowerCAmelCase_ ,lowerCAmelCase_ ) UpperCAmelCase_= np.multiply(lowerCAmelCase_ ,lowerCAmelCase_ ) UpperCAmelCase_= np.multiply(lowerCAmelCase_ ,lowerCAmelCase_ ) UpperCAmelCase_= np.sum(lowerCAmelCase_ ) / np.sum(lowerCAmelCase_ ) UpperCAmelCase_= val return imga def __a ( lowerCAmelCase_ : list ) -> tuple: '''simple docstring''' UpperCAmelCase_= args[1] if args[1:] else """../image_data/lena.jpg""" UpperCAmelCase_= float(args[2] ) if args[2:] else 1.0 UpperCAmelCase_= float(args[3] ) if args[3:] else 1.0 if args[4:]: UpperCAmelCase_= int(args[4] ) UpperCAmelCase_= kernel_size + abs(kernel_size % 2 - 1 ) else: UpperCAmelCase_= 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": __A , __A , __A , __A = parse_args(sys.argv) __A = cva.imread(filename, 0) cva.imshow('''input image''', img) __A = img / 255 __A = out.astype('''float32''') __A = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) __A = out * 255 __A = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
593
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: UpperCAmelCase_, UpperCAmelCase_= FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_, UpperCAmelCase_= FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_= controlnet_params UpperCAmelCase_= """bird""" UpperCAmelCase_= jax.device_count() UpperCAmelCase_= pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase_= load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) UpperCAmelCase_= pipe.prepare_image_inputs([canny_image] * num_samples ) UpperCAmelCase_= jax.random.PRNGKey(0 ) UpperCAmelCase_= jax.random.split(__UpperCAmelCase , jax.device_count() ) UpperCAmelCase_= replicate(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase_= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase_= images[0, 253:256, 253:256, -1] UpperCAmelCase_= jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase_= jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_, UpperCAmelCase_= FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_, UpperCAmelCase_= FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_= controlnet_params UpperCAmelCase_= """Chef in the kitchen""" UpperCAmelCase_= jax.device_count() UpperCAmelCase_= pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase_= load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) UpperCAmelCase_= pipe.prepare_image_inputs([pose_image] * num_samples ) UpperCAmelCase_= jax.random.PRNGKey(0 ) UpperCAmelCase_= jax.random.split(__UpperCAmelCase , jax.device_count() ) UpperCAmelCase_= replicate(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase_= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase_= images[0, 253:256, 253:256, -1] UpperCAmelCase_= jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase_= jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
593
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __magic_name__ = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ): @staticmethod def A_ ( lowerCamelCase ): snake_case__ = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=lowerCamelCase , required=lowerCamelCase , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=lowerCamelCase , required=lowerCamelCase , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=lowerCamelCase , required=lowerCamelCase , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=lowerCamelCase , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=lowerCamelCase , default=lowerCamelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=lowerCamelCase ) def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): snake_case__ = logging.get_logger("transformers-cli/converting" ) self._logger.info(F"""Loading model {model_type}""" ) snake_case__ = model_type snake_case__ = tf_checkpoint snake_case__ = pytorch_dump_output snake_case__ = config snake_case__ = finetuning_task_name def A_ ( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): snake_case__ = self._tf_checkpoint snake_case__ = "" else: snake_case__ = self._tf_checkpoint snake_case__ = "" convert_transfo_xl_checkpoint_to_pytorch( lowerCamelCase , self._config , self._pytorch_dump_output , lowerCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
530
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
530
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = {"""vocab_file""": """spiece.model"""} _UpperCAmelCase = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } _UpperCAmelCase = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } _UpperCAmelCase = """▁""" class _UpperCAmelCase ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Dict="[CLS]" , UpperCamelCase__ : Optional[int]="[SEP]" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Tuple="[SEP]" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : Any="[MASK]" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. A = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , ) A = do_lower_case A = remove_space A = keep_accents A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case_ ) @property def UpperCamelCase ( self : Union[str, Any] ): return len(self.sp_model ) def UpperCamelCase ( self : int ): A = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): A = self.__dict__.copy() A = None return state def __setstate__( self : Dict , UpperCamelCase__ : int ): A = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ): if self.remove_space: A = """ """.join(inputs.strip().split() ) else: A = inputs A = outputs.replace('``' , '\"' ).replace('\'\'' , '\"' ) if not self.keep_accents: A = unicodedata.normalize('NFKD' , snake_case_ ) A = """""".join([c for c in outputs if not unicodedata.combining(snake_case_ )] ) if self.do_lower_case: A = outputs.lower() return outputs def UpperCamelCase ( self : str , UpperCamelCase__ : str ): A = self.preprocess_text(snake_case_ ) A = self.sp_model.encode(snake_case_ , out_type=snake_case_ ) A = [] for piece in pieces: if len(snake_case_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): A = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A = cur_pieces[1:] else: A = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(snake_case_ ) else: new_pieces.append(snake_case_ ) return new_pieces def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Dict ): return self.sp_model.PieceToId(snake_case_ ) def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any ): return self.sp_model.IdToPiece(snake_case_ ) def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] ): A = [] A = """""" A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case_ ) + token A = True A = [] else: current_sub_tokens.append(snake_case_ ) A = False out_string += self.sp_model.decode(snake_case_ ) return out_string.strip() def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1] return [1] + ([0] * len(snake_case_ )) + [1] def UpperCamelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): if not os.path.isdir(snake_case_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A = os.path.join( snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case_ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case_ , 'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(snake_case_ ) return (out_vocab_file,)
699
lowerCamelCase_ : Tuple = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCamelCase_ : Union[str, Any] = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float: UpperCamelCase_: Any = from_type.lower().strip("""s""" ) UpperCamelCase_: int = to_type.lower().strip("""s""" ) UpperCamelCase_: Any = UNIT_SYMBOL.get(lowerCamelCase , lowerCamelCase ) UpperCamelCase_: str = UNIT_SYMBOL.get(lowerCamelCase , lowerCamelCase ) if from_sanitized not in METRIC_CONVERSION: UpperCamelCase_: Optional[int] = ( F'''Invalid \'from_type\' value: {from_type!r}.\n''' F'''Conversion abbreviations are: {", ".join(lowerCamelCase )}''' ) raise ValueError(lowerCamelCase ) if to_sanitized not in METRIC_CONVERSION: UpperCamelCase_: Dict = ( F'''Invalid \'to_type\' value: {to_type!r}.\n''' F'''Conversion abbreviations are: {", ".join(lowerCamelCase )}''' ) raise ValueError(lowerCamelCase ) UpperCamelCase_: Union[str, Any] = METRIC_CONVERSION[from_sanitized] UpperCamelCase_: str = METRIC_CONVERSION[to_sanitized] UpperCamelCase_: Tuple = 1 if from_exponent > to_exponent: UpperCamelCase_: Union[str, Any] = from_exponent - to_exponent else: UpperCamelCase_: Dict = -(to_exponent - from_exponent) return value * pow(10 , lowerCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
548
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase_ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
519
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { '''configuration_mobilebert''': [ '''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileBertConfig''', '''MobileBertOnnxConfig''', ], '''tokenization_mobilebert''': ['''MobileBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['''MobileBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ '''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileBertForMaskedLM''', '''MobileBertForMultipleChoice''', '''MobileBertForNextSentencePrediction''', '''MobileBertForPreTraining''', '''MobileBertForQuestionAnswering''', '''MobileBertForSequenceClassification''', '''MobileBertForTokenClassification''', '''MobileBertLayer''', '''MobileBertModel''', '''MobileBertPreTrainedModel''', '''load_tf_weights_in_mobilebert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ '''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileBertForMaskedLM''', '''TFMobileBertForMultipleChoice''', '''TFMobileBertForNextSentencePrediction''', '''TFMobileBertForPreTraining''', '''TFMobileBertForQuestionAnswering''', '''TFMobileBertForSequenceClassification''', '''TFMobileBertForTokenClassification''', '''TFMobileBertMainLayer''', '''TFMobileBertModel''', '''TFMobileBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
519
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCAmelCase : lowerCAmelCase_ = XGLMConfig lowerCAmelCase_ = {} lowerCAmelCase_ = "gelu" def __init__( self : List[str] , __lowercase : List[str] , __lowercase : Any=14 , __lowercase : Union[str, Any]=7 , __lowercase : Tuple=True , __lowercase : Any=True , __lowercase : int=True , __lowercase : Optional[Any]=99 , __lowercase : str=32 , __lowercase : int=2 , __lowercase : Optional[Any]=4 , __lowercase : Optional[Any]=37 , __lowercase : Tuple="gelu" , __lowercase : Tuple=0.1 , __lowercase : List[str]=0.1 , __lowercase : List[str]=512 , __lowercase : int=0.0_2 , ): """simple docstring""" __lowercase =parent __lowercase =batch_size __lowercase =seq_length __lowercase =is_training __lowercase =use_input_mask __lowercase =use_labels __lowercase =vocab_size __lowercase =d_model __lowercase =num_hidden_layers __lowercase =num_attention_heads __lowercase =ffn_dim __lowercase =activation_function __lowercase =activation_dropout __lowercase =attention_dropout __lowercase =max_position_embeddings __lowercase =initializer_range __lowercase =None __lowercase =0 __lowercase =2 __lowercase =1 def snake_case ( self : Tuple ): """simple docstring""" return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def snake_case ( self : Any ): """simple docstring""" __lowercase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __lowercase =None if self.use_input_mask: __lowercase =random_attention_mask([self.batch_size, self.seq_length] ) __lowercase =self.get_config() __lowercase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def snake_case ( self : List[Any] ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_snake_case , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_snake_case , ) def snake_case ( self : Any ): """simple docstring""" __lowercase =self.prepare_config_and_inputs() ( __lowercase ) =config_and_inputs __lowercase ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class lowerCAmelCase ( A_ , A_ , unittest.TestCase ): lowerCAmelCase_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCAmelCase_ = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCAmelCase_ = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def snake_case ( self : Optional[Any] ): """simple docstring""" __lowercase =TFXGLMModelTester(self ) __lowercase =ConfigTester(self , config_class=_snake_case , n_embd=37 ) def snake_case ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() @slow def snake_case ( self : List[str] ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase =TFXGLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def snake_case ( self : List[str] ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class lowerCAmelCase ( unittest.TestCase ): @slow def snake_case ( self : Dict , __lowercase : Dict=True ): """simple docstring""" __lowercase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowercase =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowercase =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on __lowercase =model.generate(_snake_case , do_sample=_snake_case , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , _snake_case ) @slow def snake_case ( self : str ): """simple docstring""" __lowercase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowercase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __lowercase =tokenizer('Today is a nice day and' , return_tensors='tf' ) __lowercase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __lowercase =model.generate(_snake_case , do_sample=_snake_case , seed=[7, 0] ) __lowercase =tokenizer.decode(output_ids[0] , skip_special_tokens=_snake_case ) __lowercase =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(_snake_case , _snake_case ) @slow def snake_case ( self : Tuple ): """simple docstring""" __lowercase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowercase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowercase ='''left''' # use different length sentences to test batching __lowercase =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] __lowercase =tokenizer(_snake_case , return_tensors='tf' , padding=_snake_case ) __lowercase =inputs['''input_ids'''] __lowercase =model.generate(input_ids=_snake_case , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __lowercase =tokenizer(sentences[0] , return_tensors='tf' ).input_ids __lowercase =model.generate(input_ids=_snake_case , max_new_tokens=12 ) __lowercase =tokenizer(sentences[1] , return_tensors='tf' ).input_ids __lowercase =model.generate(input_ids=_snake_case , max_new_tokens=12 ) __lowercase =tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case ) __lowercase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case ) __lowercase =tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case ) __lowercase =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(_snake_case , _snake_case ) self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
119
"""simple docstring""" lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.3_5_5_8_1_8, } def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowercase__ : Optional[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
560
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL __A : Dict = logging.get_logger(__name__) def lowerCamelCase_ ( lowercase__): if isinstance(__lowerCAmelCase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(__lowerCAmelCase , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(__lowerCAmelCase): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''') class lowercase ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase__ = ["pixel_values"] def __init__( self : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Any , ) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) lowerCamelCase__ = size if size is not None else {"shortest_edge": 256} lowerCamelCase__ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) lowerCamelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCamelCase__ = get_size_dict(lowerCamelCase__ , param_name="crop_size" ) lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = resample lowerCamelCase__ = do_rescale lowerCamelCase__ = rescale_factor lowerCamelCase__ = offset lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def a__ ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Any , ) -> np.ndarray: '''simple docstring''' lowerCamelCase__ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) if "shortest_edge" in size: lowerCamelCase__ = get_resize_output_image_size(lowerCamelCase__ , size["shortest_edge"] , default_to_square=lowerCamelCase__ ) elif "height" in size and "width" in size: lowerCamelCase__ = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def a__ ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ) -> np.ndarray: '''simple docstring''' lowerCamelCase__ = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def a__ ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ) -> Tuple: '''simple docstring''' lowerCamelCase__ = image.astype(np.floataa ) if offset: lowerCamelCase__ = image - (scale / 2) return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def a__ ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Any , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def a__ ( self : List[str] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. lowerCamelCase__ = to_numpy_array(lowerCamelCase__ ) if do_resize: lowerCamelCase__ = self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) if do_center_crop: lowerCamelCase__ = self.center_crop(lowerCamelCase__ , size=lowerCamelCase__ ) if do_rescale: lowerCamelCase__ = self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ , offset=lowerCamelCase__ ) if do_normalize: lowerCamelCase__ = self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) lowerCamelCase__ = to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) return image def a__ ( self : Optional[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Union[str, Any] , ) -> PIL.Image.Image: '''simple docstring''' lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ = resample if resample is not None else self.resample lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ = offset if offset is not None else self.offset lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ = image_std if image_std is not None else self.image_std lowerCamelCase__ = size if size is not None else self.size lowerCamelCase__ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size lowerCamelCase__ = get_size_dict(lowerCamelCase__ , param_name="crop_size" ) if not valid_images(lowerCamelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) lowerCamelCase__ = make_batched(lowerCamelCase__ ) lowerCamelCase__ = [ [ self._preprocess_image( image=lowerCamelCase__ , do_resize=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , crop_size=lowerCamelCase__ , do_rescale=lowerCamelCase__ , rescale_factor=lowerCamelCase__ , offset=lowerCamelCase__ , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , data_format=lowerCamelCase__ , ) for img in video ] for video in videos ] lowerCamelCase__ = {"pixel_values": videos} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
718
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Optional[Any] = logging.get_logger(__name__) __A : int = { """google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""", # See all ViT models at https://huggingface.co/models?filter=vit } class lowercase ( _lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = "vit" def __init__( self : Optional[int] , __lowerCamelCase : Tuple=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Tuple=224 , __lowerCamelCase : int=16 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=16 , **__lowerCamelCase : Optional[int] , ) -> str: '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = qkv_bias lowerCamelCase__ = encoder_stride class lowercase ( _lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = version.parse("1.11" ) @property def a__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def a__ ( self : Optional[int] ) -> float: '''simple docstring''' return 1E-4
187
0
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any]=13 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : Dict=[32, 64, 128] , lowerCamelCase__ : Tuple=[1, 2, 1] , lowerCamelCase__ : List[str]=[2, 2, 4] , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : List[str]=2.0 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=0.02 , lowerCamelCase__ : Union[str, Any]=1E-5 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : Dict=8 , lowerCamelCase__ : str=["stage1", "stage2"] , lowerCamelCase__ : int=[1, 2] , ): a__ : Union[str, Any] = parent a__ : List[Any] = batch_size a__ : Dict = image_size a__ : Any = patch_size a__ : Any = num_channels a__ : Optional[int] = embed_dim a__ : Tuple = hidden_sizes a__ : str = depths a__ : Optional[Any] = num_heads a__ : List[Any] = window_size a__ : Dict = mlp_ratio a__ : Union[str, Any] = qkv_bias a__ : List[str] = hidden_dropout_prob a__ : List[str] = attention_probs_dropout_prob a__ : Union[str, Any] = drop_path_rate a__ : Union[str, Any] = hidden_act a__ : Dict = use_absolute_embeddings a__ : Union[str, Any] = patch_norm a__ : Dict = layer_norm_eps a__ : Tuple = initializer_range a__ : List[str] = is_training a__ : Optional[int] = scope a__ : List[str] = use_labels a__ : Any = type_sequence_label_size a__ : Tuple = encoder_stride a__ : Optional[int] = out_features a__ : int = out_indices def _UpperCamelCase( self : List[str] ): a__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None if self.use_labels: a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : str = self.get_config() return config, pixel_values, labels def _UpperCamelCase( self : List[Any] ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ): a__ : Optional[int] = FocalNetModel(config=_A ) model.to(_A ) model.eval() a__ : Tuple = model(_A ) a__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) a__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ): a__ : Optional[Any] = FocalNetBackbone(config=_A ) model.to(_A ) model.eval() a__ : Optional[int] = model(_A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None a__ : List[Any] = None a__ : Dict = FocalNetBackbone(config=_A ) model.to(_A ) model.eval() a__ : Union[str, Any] = model(_A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _UpperCamelCase( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ): a__ : str = FocalNetForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() a__ : List[str] = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images a__ : List[Any] = 1 a__ : Dict = FocalNetForMaskedImageModeling(_A ) model.to(_A ) model.eval() a__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : List[Any] = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ): a__ : Optional[int] = self.type_sequence_label_size a__ : List[Any] = FocalNetForImageClassification(_A ) model.to(_A ) model.eval() a__ : Any = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Any = 1 a__ : str = FocalNetForImageClassification(_A ) model.to(_A ) model.eval() a__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : List[Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : List[Any] ): a__ : str = self.prepare_config_and_inputs() a__ : Optional[int] = config_and_inputs a__ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" _lowercase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) _lowercase = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : List[str] ): a__ : Tuple = FocalNetModelTester(self ) a__ : List[str] = ConfigTester(self , config_class=_A , embed_dim=37 , has_text_modality=_A ) def _UpperCamelCase( self : str ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCamelCase( self : Optional[int] ): return def _UpperCamelCase( self : List[Any] ): a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def _UpperCamelCase( self : Tuple ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_A ) def _UpperCamelCase( self : int ): a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def _UpperCamelCase( self : Any ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @unittest.skip(reason="FocalNet does not use inputs_embeds" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip(reason="FocalNet does not use feedforward chunking" ) def _UpperCamelCase( self : Optional[Any] ): pass def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: a__ : Tuple = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def _UpperCamelCase( self : int ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: a__ : str = model_class(_A ) a__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Tuple = [*signature.parameters.keys()] a__ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ): a__ : Optional[Any] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): a__ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) ) a__ : str = outputs.hidden_states a__ : Dict = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_A ) , _A ) # FocalNet has a different seq_length a__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) a__ : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(_A ) , _A ) a__ : Union[str, Any] = reshaped_hidden_states[0].shape a__ : Union[str, Any] = ( reshaped_hidden_states[0].view(_A , _A , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: a__ : int = True self.check_hidden_states_output(_A , _A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : Union[str, Any] = True self.check_hidden_states_output(_A , _A , _A , _A ) def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() a__ : List[str] = 3 a__ : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) a__ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) a__ : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) a__ : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: a__ : Any = True self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : str = True self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) ) @slow def _UpperCamelCase( self : Union[str, Any] ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : List[Any] = FocalNetModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _UpperCamelCase( self : Any ): a__ : int = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(_A ) for model_class in self.all_model_classes: a__ : Tuple = model_class(config=_A ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[Any] ): return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None @slow def _UpperCamelCase( self : Tuple ): a__ : str = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(_A ) a__ : Optional[Any] = self.default_image_processor a__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) a__ : List[str] = image_processor(images=_A , return_tensors="pt" ).to(_A ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**_A ) # verify the logits a__ : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _A ) a__ : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A__ ( __lowercase , unittest.TestCase ): """simple docstring""" _lowercase = (FocalNetBackbone,) if is_torch_available() else () _lowercase = FocalNetConfig _lowercase = False def _UpperCamelCase( self : Union[str, Any] ): a__ : Union[str, Any] = FocalNetModelTester(self )
37
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A_ ( __lowercase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = "roformer" def __init__( self , _A=50000 , _A=None , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1536 , _A=2 , _A=0.02 , _A=1e-12 , _A=0 , _A=False , _A=True , **_A , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=_A , **_A) _UpperCAmelCase : Tuple = vocab_size _UpperCAmelCase : Any = hidden_size if embedding_size is None else embedding_size _UpperCAmelCase : Any = hidden_size _UpperCAmelCase : int = num_hidden_layers _UpperCAmelCase : Dict = num_attention_heads _UpperCAmelCase : int = hidden_act _UpperCAmelCase : Optional[Any] = intermediate_size _UpperCAmelCase : str = hidden_dropout_prob _UpperCAmelCase : List[Any] = attention_probs_dropout_prob _UpperCAmelCase : Tuple = max_position_embeddings _UpperCAmelCase : List[Any] = type_vocab_size _UpperCAmelCase : Any = initializer_range _UpperCAmelCase : Any = layer_norm_eps _UpperCAmelCase : Optional[Any] = rotary_value _UpperCAmelCase : int = use_cache class A_ ( __lowercase ): '''simple docstring''' @property def snake_case__ ( self) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''} _UpperCAmelCase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ])
485
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available lowerCamelCase__ : str = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys lowerCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
495
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowerCamelCase__ : Dict = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ lowerCamelCase__ : Tuple = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ lowerCamelCase__ : Any = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' return float((preds == labels).mean() ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_="binary" ) -> int: '''simple docstring''' lowercase__ : List[str] = simple_accuracy(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ , average=lowercase_ ) ) return { "accuracy": acc, "f1": fa, } def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' lowercase__ : List[Any] = {} for id_pred, label in zip(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' lowercase__ : Optional[Any] = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowercase__ : Optional[int] = [(pred, label)] lowercase__ , lowercase__ : List[str] = [], [] for question, preds_labels in question_map.items(): lowercase__ , lowercase__ : List[str] = zip(*lowercase_ ) lowercase__ : Optional[Any] = fa_score(y_true=lowercase_ , y_pred=lowercase_ , average="""macro""" ) fas.append(lowercase_ ) lowercase__ : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) ) ems.append(lowercase_ ) lowercase__ : str = float(sum(lowercase_ ) / len(lowercase_ ) ) lowercase__ : str = sum(lowercase_ ) / len(lowercase_ ) lowercase__ : List[str] = float(fa_score(y_true=lowercase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def lowercase__ ( self): '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def lowercase__ ( self): '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64"""), "query": datasets.Value("""int64"""), }, "prediction_text": datasets.Value("""string"""), }, "references": { "idx": { "passage": datasets.Value("""int64"""), "query": datasets.Value("""int64"""), }, "answers": datasets.Sequence(datasets.Value("""string""")), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64"""), "paragraph": datasets.Value("""int64"""), "question": datasets.Value("""int64"""), }, "prediction": datasets.Value("""int64"""), }, "references": datasets.Value("""int64"""), } else: return { "predictions": datasets.Value("""int64"""), "references": datasets.Value("""int64"""), } def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)} elif self.config_name == "cb": return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , fa_avg="""macro""") elif self.config_name == "record": lowercase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowercase__ : str = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)[0] elif self.config_name == "multirc": return evaluate_multirc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
495
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __magic_name__ : Optional[Any] = logging.get_logger(__name__) class __snake_case (lowerCamelCase ): def __init__( self: Tuple , *A_: Union[str, Any] , **A_: Optional[int] ): warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" , A_ , ) super().__init__(*A_ , **A_ )
281
"""simple docstring""" from collections.abc import Callable def a_ ( lowercase__ :Callable[[float], float], lowercase__ :float, lowercase__ :float ): __lowerCamelCase = a __lowerCamelCase = b if function(lowercase__ ) == 0: # one of the a or b is a root for the function return a elif function(lowercase__ ) == 0: return b elif ( function(lowercase__ ) * function(lowercase__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("""could not find root in given interval.""" ) else: __lowerCamelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase__ ) == 0: return mid elif function(lowercase__ ) * function(lowercase__ ) < 0: __lowerCamelCase = mid else: __lowerCamelCase = mid __lowerCamelCase = start + (end - start) / 2.0 return mid def a_ ( lowercase__ :float ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
281
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : str = { "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = ["AlbertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = ["AlbertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", "AlbertForQuestionAnswering", "AlbertForSequenceClassification", "AlbertForTokenClassification", "AlbertModel", "AlbertPreTrainedModel", "load_tf_weights_in_albert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertMainLayer", "TFAlbertModel", "TFAlbertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxAlbertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
711
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} lowerCAmelCase : Dict[Optional[str], str] = {} lowerCAmelCase : Dict[Optional[str], Exception] = {} def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __lowerCAmelCase = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __lowerCAmelCase = format_type def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ): '''simple docstring''' __lowerCAmelCase = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def __lowerCAmelCase ( lowerCamelCase : Optional[str] ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ): '''simple docstring''' __lowerCAmelCase = get_format_type_from_alias(lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
39
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor SCREAMING_SNAKE_CASE_ = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ): return image elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ): __a = [image] __a = [trans(img.convert("""RGB""" ) ) for img in image] __a = torch.stack(__SCREAMING_SNAKE_CASE ) return image class lowerCAmelCase_ ( snake_case__ ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM __a = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' __a = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE__ ) __a = max(num_inference_steps - init_timestep , 0 ) __a = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None ): '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE__ )}''' ) __a = image.to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __a = init_latents.shape __a = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ) # get latents print("""add noise to latents at timestep""" , SCREAMING_SNAKE_CASE__ ) __a = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a = init_latents return latents @torch.no_grad() def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, PIL.Image.Image] = None , SCREAMING_SNAKE_CASE__ : float = 0.8 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 5_0 , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ): '''simple docstring''' self.check_inputs(SCREAMING_SNAKE_CASE__ ) # 2. Preprocess image __a = preprocess(SCREAMING_SNAKE_CASE__ ) # 3. set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device ) __a , __a = self.get_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.device ) __a = timesteps[:1].repeat(SCREAMING_SNAKE_CASE__ ) # 4. Prepare latent variables __a = self.prepare_latents(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.unet.dtype , self.device , SCREAMING_SNAKE_CASE__ ) __a = latents # 5. Denoising loop for t in self.progress_bar(SCREAMING_SNAKE_CASE__ ): # 1. predict noise model_output __a = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __a = self.scheduler.step( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , use_clipped_model_output=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , ).prev_sample __a = (image / 2 + 0.5).clamp(0 , 1 ) __a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
582
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch SCREAMING_SNAKE_CASE_ = random.Random() def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Optional[int]: """simple docstring""" if rng is None: __a = global_rng __a = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_0 , SCREAMING_SNAKE_CASE__ : Tuple=2_0_0_0 , SCREAMING_SNAKE_CASE__ : Tuple=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=1_6_0 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=4_0_0_0 , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ): '''simple docstring''' __a = parent __a = batch_size __a = min_seq_length __a = max_seq_length __a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __a = padding_value __a = sampling_rate __a = return_attention_mask __a = do_normalize __a = feature_size __a = chunk_length __a = hop_length def __a ( self : Optional[int] ): '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False ): '''simple docstring''' def _flatten(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) ) if equal_length: __a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __a = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ): """simple docstring""" a_ :Any =WhisperFeatureExtractor if is_speech_available() else None def __a ( self : str ): '''simple docstring''' __a = WhisperFeatureExtractionTester(self ) def __a ( self : List[str] ): '''simple docstring''' __a = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __a = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__ )[0] check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__ ) __a = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__ ) __a = feat_extract_first.to_dict() __a = feat_extract_second.to_dict() __a = feat_extract_first.mel_filters __a = feat_extract_second.mel_filters self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __a ( self : List[str] ): '''simple docstring''' __a = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __a = os.path.join(SCREAMING_SNAKE_CASE__ , """feat_extract.json""" ) feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__ ) __a = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__ ) __a = feat_extract_first.to_dict() __a = feat_extract_second.to_dict() __a = feat_extract_first.mel_filters __a = feat_extract_second.mel_filters self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __a ( self : List[Any] ): '''simple docstring''' __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] __a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs] # Test feature size __a = feature_extractor(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __a = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features __a = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) # Test batched __a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_features __a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __a = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] __a = np.asarray(SCREAMING_SNAKE_CASE__ ) __a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_features __a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) # Test truncation required __a = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )] __a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs] __a = [x[: feature_extractor.n_samples] for x in speech_inputs] __a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs_truncated] __a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_features __a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) def __a ( self : str ): '''simple docstring''' import torch __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa ) __a = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __a = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __a = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' __a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __a = ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def __a ( self : Optional[int] ): '''simple docstring''' __a = torch.tensor( [ 0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1, 0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8, 0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4, -0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4 ] ) # fmt: on __a = self._load_datasamples(1 ) __a = WhisperFeatureExtractor() __a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) ) self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) def __a ( self : Dict ): '''simple docstring''' __a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __a = self._load_datasamples(1 )[0] __a = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue __a = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=SCREAMING_SNAKE_CASE__ )[0] self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE__ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE__ ) - 1 ) < 1E-3 ) )
582
1
"""simple docstring""" def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __SCREAMING_SNAKE_CASE = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
703
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]: __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
690
0
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument snake_case__ : Optional[int] = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = list(s_dict.keys() ) for key in keys: __lowercase = R".*/layers_(\d+)" __lowercase = key if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = re.sub(R"layers_(\d+)" , R"block/\1/layer" , _SCREAMING_SNAKE_CASE ) __lowercase = R"(encoder|decoder)\/" if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).groups() if groups[0] == "encoder": __lowercase = re.sub(R"/mlp/" , R"/1/mlp/" , _SCREAMING_SNAKE_CASE ) __lowercase = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , _SCREAMING_SNAKE_CASE ) elif groups[0] == "decoder": __lowercase = re.sub(R"/mlp/" , R"/2/mlp/" , _SCREAMING_SNAKE_CASE ) __lowercase = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , _SCREAMING_SNAKE_CASE ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __lowercase = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""{key} -> {new_key}""" ) __lowercase = s_dict.pop(_SCREAMING_SNAKE_CASE ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowercase = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowercase = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __lowercase = s_dict[key].shape[0] __lowercase = s_dict[key] for idx in range(_SCREAMING_SNAKE_CASE ): __lowercase = expert_weihts[idx] print(F"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" ) s_dict.pop(_SCREAMING_SNAKE_CASE ) return s_dict snake_case__ : int = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): import regex as re with open(_SCREAMING_SNAKE_CASE , "r" ) as f: __lowercase = f.read() __lowercase = re.findall(R"(.*) = ([0-9.]*)" , _SCREAMING_SNAKE_CASE ) __lowercase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __lowercase = float(_SCREAMING_SNAKE_CASE ) if "." in value else int(_SCREAMING_SNAKE_CASE ) __lowercase = re.findall(R"(.*activations) = \(\'(.*)\',\)" , _SCREAMING_SNAKE_CASE )[0] __lowercase = str(activation[1] ) __lowercase = num_experts __lowercase = SwitchTransformersConfig(**_SCREAMING_SNAKE_CASE ) return config def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="./" , _SCREAMING_SNAKE_CASE=8 ): print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) __lowercase = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE ) if gin_file is not None: __lowercase = convert_gin_to_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __lowercase = SwitchTransformersConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __lowercase = SwitchTransformersForConditionalGeneration(_SCREAMING_SNAKE_CASE ) __lowercase = flax_params["target"] __lowercase = flatten_dict(_SCREAMING_SNAKE_CASE , sep="/" ) __lowercase = rename_keys(_SCREAMING_SNAKE_CASE ) __lowercase = unflatten_dict(_SCREAMING_SNAKE_CASE , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": snake_case__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") snake_case__ : int = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
402
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase : Optional[Any] = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __UpperCamelCase : Dict = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __UpperCamelCase : int = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __UpperCamelCase : List[str] = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
80
0
def __UpperCamelCase ( _lowerCAmelCase ) -> bool: """simple docstring""" A : Optional[Any] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def __UpperCamelCase ( _lowerCAmelCase = 5000 ) -> int: """simple docstring""" A : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )] for i, pentagonal_i in enumerate(_lowerCAmelCase ): for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ): A : Any = pentagonal_nums[j] A : Optional[Any] = pentagonal_i + pentagonal_j A : str = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
520
from __future__ import annotations from math import gcd def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 3 , ) -> int | None: """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: return (pow(_lowerCAmelCase , 2 ) + step) % modulus for _ in range(_lowerCAmelCase ): # These track the position within the cycle detection logic. A : Optional[Any] = seed A : List[Any] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. A : Any = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : List[str] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A : Optional[Any] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. A : str = gcd(hare - tortoise , _lowerCAmelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. A : Tuple = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse SCREAMING_SNAKE_CASE_:List[str] = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) SCREAMING_SNAKE_CASE_:Any = parser.parse_args() SCREAMING_SNAKE_CASE_:int = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"""{args.num} is probably prime""") else: SCREAMING_SNAKE_CASE_:Optional[int] = args.num // divisor print(F"""{args.num} = {divisor} * {quotient}""")
520
1
import requests from bsa import BeautifulSoup def UpperCamelCase_( _A :str = "AAPL" )-> str: UpperCamelCase__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' UpperCamelCase__ = BeautifulSoup(requests.get(_A ).text , "html.parser" ) UpperCamelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
551
__UpperCamelCase = 2_5_6 # Modulus to hash a string __UpperCamelCase = 1_0_0_0_0_0_3 def UpperCamelCase_( _A :str , _A :str )-> bool: UpperCamelCase__ = len(_A ) UpperCamelCase__ = len(_A ) if p_len > t_len: return False UpperCamelCase__ = 0 UpperCamelCase__ = 0 UpperCamelCase__ = 1 # Calculating the hash of pattern and substring of text for i in range(_A ): UpperCamelCase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus UpperCamelCase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue UpperCamelCase__ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash UpperCamelCase__ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCamelCase_( )-> None: UpperCamelCase__ = "abc1abc12" UpperCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc" UpperCamelCase__ = "alskfjaldsk23adsfabcabc" assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A ) # Test 2) UpperCamelCase__ = "ABABX" UpperCamelCase__ = "ABABZABABYABABX" assert rabin_karp(_A , _A ) # Test 3) UpperCamelCase__ = "AAAB" UpperCamelCase__ = "ABAAAAAB" assert rabin_karp(_A , _A ) # Test 4) UpperCamelCase__ = "abcdabcy" UpperCamelCase__ = "abcxabcdabxabcdabcdabcy" assert rabin_karp(_A , _A ) # Test 5) UpperCamelCase__ = "Lü" UpperCamelCase__ = "Lüsai" assert rabin_karp(_A , _A ) UpperCamelCase__ = "Lue" assert not rabin_karp(_A , _A ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
551
1
"""simple docstring""" from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar A = TypeVar("""T""") class _UpperCamelCase ( Generic[T] ): """simple docstring""" snake_case_ = 4_2 # Cache store of keys snake_case_ = 4_2 # References of the keys in cache snake_case_ = 1_0 # Maximum capacity of cache def __init__( self : Optional[Any] , snake_case : int ) -> None: '''simple docstring''' __magic_name__ : Any = deque() __magic_name__ : Dict = set() if not n: __magic_name__ : Dict = sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: __magic_name__ : int = n def _UpperCAmelCase ( self : List[Any] , snake_case : T ) -> None: '''simple docstring''' if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: __magic_name__ : Tuple = self.dq_store.pop() self.key_reference.remove(snake_case ) else: self.dq_store.remove(snake_case ) self.dq_store.appendleft(snake_case ) self.key_reference.add(snake_case ) def _UpperCAmelCase ( self : Optional[int] ) -> None: '''simple docstring''' for k in self.dq_store: print(snake_case ) def __repr__( self : int ) -> str: '''simple docstring''' return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() A = LRUCache(4) lru_cache.refer("""A""") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("""A""") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
721
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging A = logging.get_logger(__name__) A = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class _UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" snake_case_ = 'xlm-prophetnet' snake_case_ = ['past_key_values'] snake_case_ = { 'num_attention_heads': 'num_encoder_attention_heads', } def __init__( self : Tuple , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 3_0522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : List[str] , ) -> str: '''simple docstring''' __magic_name__ : List[str] = vocab_size __magic_name__ : Optional[int] = hidden_size __magic_name__ : Any = encoder_ffn_dim __magic_name__ : str = num_encoder_layers __magic_name__ : List[str] = num_encoder_attention_heads __magic_name__ : Dict = decoder_ffn_dim __magic_name__ : int = num_decoder_layers __magic_name__ : str = num_decoder_attention_heads __magic_name__ : Tuple = max_position_embeddings __magic_name__ : Optional[int] = init_std # Normal(0, this parameter) __magic_name__ : Optional[int] = activation_function # parameters for xlmprophetnet __magic_name__ : int = ngram __magic_name__ : List[Any] = num_buckets __magic_name__ : int = relative_max_distance __magic_name__ : List[str] = disable_ngram_loss __magic_name__ : Union[str, Any] = eps # 3 Types of Dropout __magic_name__ : Tuple = attention_dropout __magic_name__ : List[Any] = activation_dropout __magic_name__ : Optional[int] = dropout __magic_name__ : Dict = use_cache super().__init__( pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , ) @property def _UpperCAmelCase ( self : Union[str, Any] ) -> int: '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _UpperCAmelCase ( self : List[Any] , snake_case : List[Any] ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
147
0
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __magic_name__: int = random.Random() def UpperCamelCase ( _A, _A=1.0, _A=None, _A=None ): """simple docstring""" if rng is None: __magic_name__ : Any = global_rng __magic_name__ : List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=4_00 , lowerCAmelCase__=20_00 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_60_00 , lowerCAmelCase__=True , lowerCAmelCase__=True , ) -> Optional[int]: __magic_name__ : List[Any] = parent __magic_name__ : Dict = batch_size __magic_name__ : str = min_seq_length __magic_name__ : Any = max_seq_length __magic_name__ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __magic_name__ : List[str] = feature_size __magic_name__ : int = padding_value __magic_name__ : Any = sampling_rate __magic_name__ : Dict = return_attention_mask __magic_name__ : str = do_normalize def __magic_name__ ( self ) -> Dict: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __magic_name__ ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict: def _flatten(lowerCAmelCase__ ): return list(itertools.chain(*lowerCAmelCase__ ) ) if equal_length: __magic_name__ : int = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __magic_name__ : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __magic_name__ : Optional[Any] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs class snake_case__ ( _lowerCAmelCase , unittest.TestCase ): lowercase__ : Optional[int] = WavaVecaFeatureExtractor def __magic_name__ ( self ) -> Tuple: __magic_name__ : Tuple = WavaVecaFeatureExtractionTester(self ) def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]: self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1e-3 ) ) def __magic_name__ ( self ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus __magic_name__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __magic_name__ : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __magic_name__ : List[Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test not batched input __magic_name__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values __magic_name__ : Optional[int] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) # Test batched __magic_name__ : Union[str, Any] = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values __magic_name__ : Tuple = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __magic_name__ : Dict = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __magic_name__ : List[Any] = np.asarray(lowerCAmelCase__ ) __magic_name__ : Optional[int] = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values __magic_name__ : Optional[Any] = feat_extract(lowerCAmelCase__ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) def __magic_name__ ( self ) -> Any: __magic_name__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __magic_name__ : Any = ["""longest""", """max_length""", """do_not_pad"""] __magic_name__ : Union[str, Any] = [None, 16_00, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __magic_name__ : List[str] = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="""np""" ) __magic_name__ : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self.assertTrue(input_values[0][8_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self.assertTrue(input_values[0][10_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def __magic_name__ ( self ) -> List[str]: __magic_name__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ : Any = range(8_00 , 14_00 , 2_00 ) __magic_name__ : Optional[int] = [floats_list((1, x) )[0] for x in lengths] __magic_name__ : List[str] = ["""longest""", """max_length""", """do_not_pad"""] __magic_name__ : Dict = [None, 16_00, None] for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __magic_name__ : Optional[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ ) __magic_name__ : str = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def __magic_name__ ( self ) -> Tuple: __magic_name__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __magic_name__ : Tuple = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" ) __magic_name__ : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __magic_name__ ( self ) -> int: __magic_name__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __magic_name__ : int = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10_00 , padding="""longest""" , return_tensors="""np""" ) __magic_name__ : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 10_00) ) __magic_name__ : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __magic_name__ : int = feat_extract( lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=20_00 , padding="""longest""" , return_tensors="""np""" ) __magic_name__ : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 12_00) ) @require_torch def __magic_name__ ( self ) -> List[str]: import torch __magic_name__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ : Dict = np.random.rand(1_00 ).astype(np.floataa ) __magic_name__ : List[str] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __magic_name__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __magic_name__ : Optional[int] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def __magic_name__ ( self ) -> Dict: # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __magic_name__ : Optional[int] = WavaVecaConfig.from_pretrained(lowerCAmelCase__ ) __magic_name__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
324
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class snake_case__ : lowercase__ : int lowercase__ : int class snake_case__ : def __init__( self , lowerCAmelCase__ ) -> Dict: __magic_name__ : list[list[Edge]] = [[] for _ in range(lowerCAmelCase__ )] __magic_name__ : int = size def __getitem__( self , lowerCAmelCase__ ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def __magic_name__ ( self ) -> Optional[int]: return self._size def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int | None: __magic_name__ : Tuple = deque([start_vertex] ) __magic_name__ : list[int | None] = [None] * self.size __magic_name__ : Optional[Any] = 0 while queue: __magic_name__ : str = queue.popleft() __magic_name__ : Any = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: __magic_name__ : List[Any] = current_distance + edge.weight __magic_name__ : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and new_distance >= dest_vertex_distance ): continue __magic_name__ : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
324
1
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int: if exponent == 1: return base if exponent % 2 == 0: snake_case__ : Dict = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value def UpperCamelCase__ ( A__ = 1777 , A__ = 1855 , A__ = 8 ) -> int: snake_case__ : Tuple = base for _ in range(1 , A__ ): snake_case__ : Any = _modexpt(A__ , A__ , 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
699
import numpy as np import qiskit def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str: snake_case__ : Optional[int] = np.random.default_rng(seed=A__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case__ : Tuple = 6 * key_len # Measurement basis for Alice's qubits. snake_case__ : Tuple = rng.integers(2 , size=A__ ) # The set of states Alice will prepare. snake_case__ : List[str] = rng.integers(2 , size=A__ ) # Measurement basis for Bob's qubits. snake_case__ : List[Any] = rng.integers(2 , size=A__ ) # Quantum Circuit to simulate BB84 snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A__ ): if alice_state[index] == 1: bbaa_circ.x(A__ ) if alice_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A__ ): if bob_basis[index] == 1: bbaa_circ.h(A__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ ) # Returns the result of measurement. snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case__ : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A__ , A__ , A__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' ) return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
699
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ =logging.get_logger(__name__) lowercase__ ='▁' lowercase__ ={'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowercase__ ={ 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowercase__ ={'vinai/bartpho-syllable': 1024} class UpperCamelCase__ ( __lowercase ): _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"] def __init__(self : Tuple , snake_case_ : Dict , snake_case_ : str , snake_case_ : str="<s>" , snake_case_ : int="</s>" , snake_case_ : List[str]="</s>" , snake_case_ : Dict="<s>" , snake_case_ : Dict="<unk>" , snake_case_ : Optional[Any]="<pad>" , snake_case_ : Optional[int]="<mask>" , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : int , ): # Mask token behave like a normal word, i.e. include the space before it __a : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token __a : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , ) __a : List[Any] = vocab_file __a : int = monolingual_vocab_file __a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __a : Any = {} __a : List[Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(snake_case_ ) not in self.fairseq_tokens_to_ids: __a : Dict = cnt cnt += 1 with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): __a : int = line.strip().split()[0] __a : List[Any] = len(self.fairseq_tokens_to_ids ) if str(snake_case_ ) not in self.fairseq_tokens_to_ids: __a : Optional[int] = len(self.fairseq_tokens_to_ids ) __a : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__(self : Union[str, Any] ): __a : Optional[int] = self.__dict__.copy() __a : Any = None __a : List[str] = self.sp_model.serialized_model_proto() return state def __setstate__(self : List[str] , snake_case_ : Any ): __a : Optional[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a : str = {} __a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCAmelCase (self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a : List[str] = [self.cls_token_id] __a : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase (self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ ) if token_ids_a is None: return [1] + ([0] * len(snake_case_ )) + [1] return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1] def lowerCAmelCase (self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): __a : Any = [self.sep_token_id] __a : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCAmelCase (self : Optional[int] ): return len(self.fairseq_ids_to_tokens ) def lowerCAmelCase (self : str ): __a : Any = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase (self : Union[str, Any] , snake_case_ : str ): return self.sp_model.encode(snake_case_ , out_type=snake_case_ ) def lowerCAmelCase (self : Optional[int] , snake_case_ : Any ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCAmelCase (self : Tuple , snake_case_ : Any ): return self.fairseq_ids_to_tokens[index] def lowerCAmelCase (self : str , snake_case_ : Dict ): __a : Union[str, Any] = ''''''.join(snake_case_ ).replace(snake_case_ , ''' ''' ).strip() return out_string def lowerCAmelCase (self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not os.path.isdir(snake_case_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return __a : List[str] = os.path.join( snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __a : int = os.path.join( snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case_ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case_ , '''wb''' ) as fi: __a : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(snake_case_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( snake_case_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , snake_case_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(snake_case_ )} \n" ) return out_vocab_file, out_monolingual_vocab_file
521
import numpy as np from transformers import Pipeline def __UpperCamelCase ( lowerCAmelCase__ : Tuple ): __a : Union[str, Any] = np.max(lowerCAmelCase__ , axis=-1 , keepdims=lowerCAmelCase__ ) __a : List[Any] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase__ ) class UpperCamelCase__ ( __lowercase ): def lowerCAmelCase (self : Dict , **snake_case_ : str ): __a : int = {} if "second_text" in kwargs: __a : Tuple = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : str=None ): return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework ) def lowerCAmelCase (self : Tuple , snake_case_ : Dict ): return self.model(**snake_case_ ) def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Dict ): __a : Optional[Any] = model_outputs.logits[0].numpy() __a : Optional[int] = softmax(snake_case_ ) __a : Optional[int] = np.argmax(snake_case_ ) __a : str = self.model.config.idalabel[best_class] __a : Any = probabilities[best_class].item() __a : Optional[int] = logits.tolist() return {"label": label, "score": score, "logits": logits}
521
1
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = IFInpaintingSuperResolutionPipeline UpperCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} UpperCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"} def A_ ( self : Optional[int] ) -> Tuple: """simple docstring""" return self._get_superresolution_dummy_components() def A_ ( self : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Dict=0 ) -> Tuple: """simple docstring""" if str(_UpperCAmelCase ).startswith("mps" ): SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(_UpperCAmelCase ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6), rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def A_ ( self : List[str] ) -> List[str]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def A_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA" ) def A_ ( self : List[Any] ) -> List[Any]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def A_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def A_ ( self : Optional[int] ) -> str: """simple docstring""" self._test_save_load_local() def A_ ( self : Dict ) -> int: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2, )
705
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _lowerCamelCase : Tuple = datasets.logging.get_logger(__name__) _lowerCamelCase : Any = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' _lowerCamelCase : Optional[int] = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' _lowerCamelCase : str = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : List[Any]="dummy_doc" ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = {doc: key_lines} SCREAMING_SNAKE_CASE__ : Tuple = {doc: sys_lines} SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : int = 0 SCREAMING_SNAKE_CASE__ : int = 0 SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ ) key_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE__ : str = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE__ ) sys_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE__ : Tuple = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if remove_nested: SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE__ : Dict = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( "Number of resulting singleton clusters in the key " f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' "files, respectively" ) return doc_coref_infos def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = get_coref_infos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = {} SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Optional[int] = 0 for name, metric in metrics: SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , ) if conll_subparts_num == 3: SCREAMING_SNAKE_CASE__ : List[Any] = (conll / 3) * 1_00 logger.info(f'''CoNLL score: {conll:.2f}''' ) output_scores.update({"conll_score": conll} ) return output_scores def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: SCREAMING_SNAKE_CASE__ : Optional[int] = line.split()[5] if not parse_col == "-": SCREAMING_SNAKE_CASE__ : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase (datasets.Metric ): """simple docstring""" def A_ ( self : Optional[Any] ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ), codebase_urls=["https://github.com/ns-moosavi/coval"], reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ], ) def A_ ( self : Any, _UpperCAmelCase : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : str=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[Any]=False ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: SCREAMING_SNAKE_CASE__ : Any = util.check_gold_parse_annotation(_UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" SCREAMING_SNAKE_CASE__ : List[str] = evaluate( key_lines=_UpperCAmelCase, sys_lines=_UpperCAmelCase, metrics=_UpperCAmelCase, NP_only=_UpperCAmelCase, remove_nested=_UpperCAmelCase, keep_singletons=_UpperCAmelCase, min_span=_UpperCAmelCase, ) return score
157
0
from string import ascii_uppercase lowerCAmelCase__ = {str(ord(c) - 55): c for c in ascii_uppercase} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError("int() can\'t convert non-string with explicit base" ) if num < 0: raise ValueError("parameter must be positive int" ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError("\'str\' object cannot be interpreted as an integer" ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError("\'float\' object cannot be interpreted as an integer" ) if base in (0, 1): raise ValueError("base must be >= 2" ) if base > 36: raise ValueError("base must be <= 36" ) _A : List[Any] = "" _A : Any = 0 _A : Any = 0 while div != 1: _A , _A : str = divmod(UpperCamelCase__ , UpperCamelCase__ ) if base >= 11 and 9 < mod < 36: _A : int = ALPHABET_VALUES[str(UpperCamelCase__ )] else: _A : int = str(UpperCamelCase__ ) new_value += actual_value _A : Any = num // base _A : int = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(UpperCamelCase__ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(10_00): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
503
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = """▁""" lowercase = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} lowercase = { """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } lowercase = { """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } lowercase = { """ernie-m-base""": 5_1_4, """ernie-m-large""": 5_1_4, } lowercase = { """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class __lowercase ( A ): '''simple docstring''' _A : List[str] = ["input_ids"] _A : List[Any] = VOCAB_FILES_NAMES _A : int = PRETRAINED_INIT_CONFIGURATION _A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Any = PRETRAINED_VOCAB_FILES_MAP _A : Any = RESOURCE_FILES_NAMES def __init__( self : int , _a : str , _a : Any=None , _a : Any=False , _a : int="utf8" , _a : Tuple="[UNK]" , _a : int="[SEP]" , _a : List[str]="[PAD]" , _a : List[Any]="[CLS]" , _a : List[Any]="[MASK]" , _a : Optional[Dict[str, Any]] = None , **_a : Dict , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , vocab_file=_a , encoding=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) UpperCamelCase__ = do_lower_case UpperCamelCase__ = sentencepiece_model_ckpt UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCamelCase__ = self.load_vocab(filepath=_a ) else: UpperCamelCase__ = {self.sp_model.id_to_piece(_a ): id for id in range(self.sp_model.get_piece_size() )} UpperCamelCase__ = {v: k for k, v in self.vocab.items()} def A_ ( self : Union[str, Any] , _a : Optional[Any] ): if text is None: return None UpperCamelCase__ = self.tokenize(_a ) UpperCamelCase__ , UpperCamelCase__ = '''''', [] for i, ch in enumerate(_a ): if ch in self.SP_CHAR_MAPPING: UpperCamelCase__ = self.SP_CHAR_MAPPING.get(_a ) else: UpperCamelCase__ = unicodedata.normalize('''NFKC''' , _a ) if self.is_whitespace(_a ): continue normalized_text += ch char_mapping.extend([i] * len(_a ) ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = normalized_text, [], 0 if self.do_lower_case: UpperCamelCase__ = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCamelCase__ = token[1:] UpperCamelCase__ = text[offset:].index(_a ) + offset UpperCamelCase__ = start + len(_a ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCamelCase__ = end return token_mapping @property def A_ ( self : Dict ): return len(self.vocab ) def A_ ( self : int ): return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : List[Any] ): UpperCamelCase__ = self.__dict__.copy() UpperCamelCase__ = None return state def __setstate__( self : Any , _a : Any ): UpperCamelCase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase__ = {} UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def A_ ( self : Dict , _a : Any ): return "".join((self.SP_CHAR_MAPPING.get(_a , _a ) for c in text) ) def A_ ( self : Any , _a : Dict , _a : Optional[Any]=False , _a : str=64 , _a : List[str]=0.1 ): if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCamelCase__ = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCamelCase__ = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCamelCase__ = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCamelCase__ = self.sp_model.EncodeAsPieces(_a ) else: UpperCamelCase__ = self.sp_model.SampleEncodeAsPieces(_a , _a , _a ) UpperCamelCase__ = [] for pi, piece in enumerate(_a ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_a ) and pi != 0: new_pieces.append(_a ) continue else: continue UpperCamelCase__ = 0 for i, chunk in enumerate(_a ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_a ) or self.is_punct(_a ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_a ) UpperCamelCase__ = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase__ = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCamelCase__ = i if len(_a ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def A_ ( self : List[str] , _a : Optional[Any] ): UpperCamelCase__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip() return out_string def A_ ( self : Optional[int] , _a : Optional[Any] ): UpperCamelCase__ = self.convert_ids_to_tokens(_a ) UpperCamelCase__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip() return out_string def A_ ( self : Optional[Any] , _a : Tuple ): return self.vocab.get(_a , self.vocab.get(self.unk_token ) ) def A_ ( self : List[Any] , _a : Tuple ): return self.reverse_vocab.get(_a , self.unk_token ) def A_ ( self : Optional[int] , _a : Optional[int] , _a : Optional[int]=None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase__ = [self.cls_token_id] UpperCamelCase__ = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def A_ ( self : Optional[Any] , _a : Optional[int] , _a : str=None ): if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def A_ ( self : Any , _a : Tuple , _a : Tuple=None , _a : Any=False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] def A_ ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ): # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(_a ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_a ) + 1) + [1] * (len(_a ) + 3) def A_ ( self : Optional[int] , _a : Optional[int] ): if "\u4e00" <= char <= "\u9fff": return True return False def A_ ( self : List[str] , _a : List[str] ): if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def A_ ( self : Dict , _a : Any ): if char in ",;:.?!~,;:。?!《》【】": return True return False def A_ ( self : int , _a : Dict ): if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_a ) == 1: UpperCamelCase__ = unicodedata.category(_a ) if cat == "Zs": return True return False def A_ ( self : Any , _a : Optional[Any] ): UpperCamelCase__ = {} with io.open(_a , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(_a ): UpperCamelCase__ = line.rstrip('''\n''' ) UpperCamelCase__ = int(_a ) return token_to_idx def A_ ( self : Union[str, Any] , _a : str , _a : Optional[str] = None ): UpperCamelCase__ = 0 if os.path.isdir(_a ): UpperCamelCase__ = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCamelCase__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(_a , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _a : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCamelCase__ = token_index writer.write(token + '''\n''' ) index += 1 UpperCamelCase__ = os.path.join(_a , '''sentencepiece.bpe.model''' ) with open(_a , '''wb''' ) as fi: UpperCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_a ) return (vocab_file,)
240
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase__ ( __lowerCamelCase : int ): '''simple docstring''' _UpperCAmelCase : int =int(number**0.5 ) return number == sq * sq def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' _UpperCAmelCase : int =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _UpperCAmelCase : int =x_den * y_den * z_den _UpperCAmelCase : int =gcd(__lowerCamelCase , __lowerCamelCase ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase__ ( __lowerCamelCase : int = 3_5 ): '''simple docstring''' _UpperCAmelCase : set =set() _UpperCAmelCase : int _UpperCAmelCase : Fraction =Fraction(0 ) _UpperCAmelCase : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _UpperCAmelCase : Dict =x_num * y_den + x_den * y_num _UpperCAmelCase : str =x_den * y_den _UpperCAmelCase : str =gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase : List[str] =add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 _UpperCAmelCase : Any =( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _UpperCAmelCase : Optional[Any] =x_den * x_den * y_den * y_den if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): _UpperCAmelCase : Union[str, Any] =int(sqrt(__lowerCamelCase ) ) _UpperCAmelCase : Union[str, Any] =int(sqrt(__lowerCamelCase ) ) _UpperCAmelCase : int =gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase : Tuple =add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=-1 _UpperCAmelCase : str =x_num * y_num _UpperCAmelCase : Union[str, Any] =x_den * y_num + x_num * y_den _UpperCAmelCase : Optional[int] =gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase : List[str] =add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) # n=2 _UpperCAmelCase : Any =x_num * x_num * y_num * y_num _UpperCAmelCase : Any =( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ): _UpperCAmelCase : List[Any] =int(sqrt(__lowerCamelCase ) ) _UpperCAmelCase : Dict =int(sqrt(__lowerCamelCase ) ) _UpperCAmelCase : Tuple =gcd(__lowerCamelCase , __lowerCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase : Tuple =add_three( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) unique_s.add(__lowerCamelCase ) for num, den in unique_s: total += Fraction(__lowerCamelCase , __lowerCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
716
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase =logging.get_logger(__name__) lowercase ={ 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class __magic_name__ ( lowerCAmelCase ): UpperCAmelCase ="dpr" def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=0 , snake_case="absolute" , snake_case = 0 , **snake_case , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=snake_case , **snake_case) _UpperCAmelCase : int =vocab_size _UpperCAmelCase : Dict =hidden_size _UpperCAmelCase : List[Any] =num_hidden_layers _UpperCAmelCase : List[Any] =num_attention_heads _UpperCAmelCase : str =hidden_act _UpperCAmelCase : Optional[Any] =intermediate_size _UpperCAmelCase : Optional[Any] =hidden_dropout_prob _UpperCAmelCase : Tuple =attention_probs_dropout_prob _UpperCAmelCase : int =max_position_embeddings _UpperCAmelCase : Tuple =type_vocab_size _UpperCAmelCase : Union[str, Any] =initializer_range _UpperCAmelCase : Tuple =layer_norm_eps _UpperCAmelCase : int =projection_dim _UpperCAmelCase : List[Any] =position_embedding_type
331
0
'''simple docstring''' from __future__ import annotations SCREAMING_SNAKE_CASE_: Optional[Any] =1.6_0_2_1E-1_9 # units = C def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> int: '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif conductivity < 0: raise ValueError("Conductivity cannot be negative" ) elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative" ) elif mobility < 0: raise ValueError("mobility cannot be negative" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
78
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __lowerCamelCase : Optional[Any] = None __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Any = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase : List[str] = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } __lowerCamelCase : List[Any] = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } __lowerCamelCase : List[str] = """▁""" class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = AlbertTokenizer def __init__( self : List[Any] , __A : Optional[int]=None , __A : Any=None , __A : Optional[Any]=True , __A : List[Any]=True , __A : Tuple=False , __A : str="[CLS]" , __A : int="[SEP]" , __A : Optional[int]="<unk>" , __A : List[str]="[SEP]" , __A : Optional[Any]="<pad>" , __A : Union[str, Any]="[CLS]" , __A : Optional[Any]="[MASK]" , **__A : Optional[int] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case__ : Optional[Any] = ( AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A ) if isinstance(__A , __A ) else mask_token ) super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) snake_case__ : Any = do_lower_case snake_case__ : int = remove_space snake_case__ : List[Any] = keep_accents snake_case__ : str = vocab_file snake_case__ : int = False if not self.vocab_file else True def _lowercase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : str = [self.sep_token_id] snake_case__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Optional[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
297
0
"""simple docstring""" import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : int = DebertaTokenizer A__ : List[str] = True A__ : Optional[Any] = DebertaTokenizerFast def __UpperCAmelCase ( self : str ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] _snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) _snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _snake_case = {'''unk_token''': '''[UNK]'''} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" _snake_case = '''lower newer''' _snake_case = '''lower newer''' return input_text, output_text def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = self.get_tokenizer() _snake_case = '''lower newer''' _snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _snake_case = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) _snake_case = tokens + [tokenizer.unk_token] _snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.get_tokenizer() _snake_case = tokenizer('''Hello''' , '''World''' ) _snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowerCamelCase ) @slow def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" _snake_case = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) _snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase ) _snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase ) _snake_case = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) _snake_case = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) _snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) _snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" _snake_case = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: _snake_case = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) _snake_case = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] _snake_case = tokenizer(__lowerCamelCase , padding=__lowerCamelCase ) _snake_case = [tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) for seq in encoding['''input_ids''']] # fmt: off _snake_case = { '''input_ids''': [ [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on _snake_case = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowerCamelCase ) for expected, decoded in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(__lowerCamelCase , __lowerCamelCase )
717
"""simple docstring""" def snake_case ( ) -> Tuple: _snake_case = 0 for i in range(1 , 1001 ): total += i**i return str(lowerCAmelCase_ )[-10:] if __name__ == "__main__": print(solution())
404
0
'''simple docstring''' import string from math import logaa def lowercase__ ( __lowercase : str , __lowercase : str ) -> Dict: """simple docstring""" __UpperCamelCase = document.translate( str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' ) __UpperCamelCase = document_without_punctuation.split(' ' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowercase__ ( __lowercase : str , __lowercase : str ) -> Optional[int]: """simple docstring""" __UpperCamelCase = corpus.lower().translate( str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with '' __UpperCamelCase = corpus_without_punctuation.split('\n' ) __UpperCamelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(snake_case_ )) def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : str=False ) -> Optional[int]: """simple docstring""" if smoothing: if n == 0: raise ValueError('log10(0) is undefined.' ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError('df must be > 0' ) elif n == 0: raise ValueError('log10(0) is undefined.' ) return round(logaa(n / df ) , 3 ) def lowercase__ ( __lowercase : int , __lowercase : int ) -> Optional[int]: """simple docstring""" return round(tf * idf , 3 )
399
import argparse import os import re __lowerCamelCase : int = """src/diffusers""" # Pattern that looks at the indentation in a line. __lowerCamelCase : List[str] = re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. __lowerCamelCase : Union[str, Any] = re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __lowerCamelCase : str = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. __lowerCamelCase : Optional[Any] = re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __lowerCamelCase : int = re.compile(R"""\[([^\]]+)\]""") def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): snake_case__ : Any = _re_indent.search(snake_case_ ) return "" if search is None else search.groups()[0] def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : str="" , snake_case_ : int=None , snake_case_ : Tuple=None ): snake_case__ : str = 0 snake_case__ : int = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(snake_case_ ): index += 1 snake_case__ : List[Any] = ["\n".join(lines[:index] )] else: snake_case__ : Union[str, Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : List[str] = [lines[index]] index += 1 while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(snake_case_ ) ) if index < len(snake_case_ ) - 1: snake_case__ : List[str] = [lines[index + 1]] index += 1 else: snake_case__ : Tuple = [] else: blocks.append("\n".join(snake_case_ ) ) snake_case__ : Dict = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(snake_case_ ) > 0: blocks.append("\n".join(snake_case_ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(snake_case_ ): blocks.append("\n".join(lines[index:] ) ) return blocks def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ): def _inner(snake_case_ : Any ): return key(snake_case_ ).lower().replace("_" , "" ) return _inner def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : int=None ): # If no key is provided, we use a noop. def noop(snake_case_ : Tuple ): return x if key is None: snake_case__ : Dict = noop # Constants are all uppercase, they go first. snake_case__ : Union[str, Any] = [obj for obj in objects if key(snake_case_ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : str = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : Any = [obj for obj in objects if not key(snake_case_ )[0].isupper()] snake_case__ : Tuple = ignore_underscore(snake_case_ ) return sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ): # This inner function sort imports between [ ]. def _replace(snake_case_ : Tuple ): snake_case__ : Dict = match.groups()[0] if "," not in imports: return F'''[{imports}]''' snake_case__ : int = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : Dict = keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case_ )] ) + "]" snake_case__ : Any = import_statement.split("\n" ) if len(snake_case_ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Optional[int] = 2 if lines[1].strip() == "[" else 1 snake_case__ : Union[str, Any] = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : List[str] = sort_objects(snake_case_ , key=lambda snake_case_ : x[1] ) snake_case__ : Any = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(snake_case_ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : int = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : Optional[Any] = keys[:-1] snake_case__ : List[str] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case_ )] ) return "\n".join(snake_case_ ) else: # Finally we have to deal with imports fitting on one line snake_case__ : int = _re_bracket_content.sub(_replace , snake_case_ ) return import_statement def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str=True ): with open(snake_case_ , "r" ) as f: snake_case__ : str = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : str = split_code_in_indented_blocks( snake_case_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(snake_case_ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : Optional[int] = main_blocks[block_idx] snake_case__ : Any = block.split("\n" ) # Get to the start of the imports. snake_case__ : int = 0 while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : Dict = len(snake_case_ ) else: line_idx += 1 if line_idx >= len(snake_case_ ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : Optional[Any] = "\n".join(block_lines[line_idx:-1] ) snake_case__ : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Dict = split_code_in_indented_blocks(snake_case_ , indent_level=snake_case_ ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : List[str] = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : int = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : List[Any] = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None] snake_case__ : str = [x[0] for x in sorted(snake_case_ , key=lambda snake_case_ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : List[str] = 0 snake_case__ : Any = [] for i in range(len(snake_case_ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: snake_case__ : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(snake_case_ ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : int = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(snake_case_ ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(snake_case_ , "w" ) as f: f.write("\n".join(snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : int=True ): snake_case__ : Dict = [] for root, _, files in os.walk(snake_case_ ): if "__init__.py" in files: snake_case__ : List[Any] = sort_imports(os.path.join(snake_case_ , "__init__.py" ) , check_only=snake_case_ ) if result: snake_case__ : List[str] = [os.path.join(snake_case_ , "__init__.py" )] if len(snake_case_ ) > 0: raise ValueError(F'''Would overwrite {len(snake_case_ )} files, run `make style`.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") __lowerCamelCase : Tuple = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
297
0
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self ): __a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __a = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __a = shift_tokens_right(_a , model.config.pad_token_id , model.config.decoder_start_token_id ) __a = model(_a , decoder_input_ids=_a ).logits __a = optax.softmax_cross_entropy(_a , onehot(_a , logits.shape[-1] ) ).mean() __a = -(labels.shape[-1] * loss.item()) __a = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
65
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : '''simple docstring''' def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_labels __a = num_choices __a = scope def __UpperCAmelCase ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a = None __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a = ids_tensor([self.batch_size] , self.num_choices ) __a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self ): __a = self.get_config() __a = 300 return config def __UpperCAmelCase ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ): __a = MraModel(config=_a ) model.to(_a ) model.eval() __a = model(_a , attention_mask=_a , token_type_ids=_a ) __a = model(_a , token_type_ids=_a ) __a = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ): __a = True __a = MraModel(_a ) model.to(_a ) model.eval() __a = model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) __a = model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , ) __a = model(_a , attention_mask=_a , token_type_ids=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ): __a = MraForMaskedLM(config=_a ) model.to(_a ) model.eval() __a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ): __a = MraForQuestionAnswering(config=_a ) model.to(_a ) model.eval() __a = model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ): __a = self.num_labels __a = MraForSequenceClassification(_a ) model.to(_a ) model.eval() __a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ): __a = self.num_labels __a = MraForTokenClassification(config=_a ) model.to(_a ) model.eval() __a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ): __a = self.num_choices __a = MraForMultipleChoice(config=_a ) model.to(_a ) model.eval() __a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): __a = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = config_and_inputs __a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __UpperCAmelCase : str = False __UpperCAmelCase : Any = False __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Dict = () def __UpperCAmelCase ( self ): __a = MraModelTester(self ) __a = ConfigTester(self , config_class=_a , hidden_size=37 ) def __UpperCAmelCase ( self ): self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a = type self.model_tester.create_and_check_model(*_a ) def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def __UpperCAmelCase ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def __UpperCAmelCase ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = MraModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip(reason='''MRA does not output attentions''' ) def __UpperCAmelCase ( self ): return @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self ): __a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __a = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __a = model(_a )[0] __a = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _a ) __a = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) ) @slow def __UpperCAmelCase ( self ): __a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __a = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __a = model(_a )[0] __a = 50_265 __a = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _a ) __a = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) ) @slow def __UpperCAmelCase ( self ): __a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __a = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __a = model(_a )[0] __a = 50_265 __a = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , _a ) __a = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
65
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCAmelCase = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["LayoutLMv2FeatureExtractor"] _UpperCAmelCase = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
409
from itertools import permutations def a__ ( __UpperCamelCase ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False SCREAMING_SNAKE_CASE_ = [7, 1_1, 1_3, 1_7] for i, test in enumerate(__UpperCamelCase ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def a__ ( __UpperCamelCase = 1_0 ): return sum( int("".join(map(__UpperCamelCase , __UpperCamelCase ) ) ) for num in permutations(range(__UpperCamelCase ) ) if is_substring_divisible(__UpperCamelCase ) ) if __name__ == "__main__": print(f"{solution() = }")
140
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { 'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegatronBertForCausalLM', 'MegatronBertForMaskedLM', 'MegatronBertForMultipleChoice', 'MegatronBertForNextSentencePrediction', 'MegatronBertForPreTraining', 'MegatronBertForQuestionAnswering', 'MegatronBertForSequenceClassification', 'MegatronBertForTokenClassification', 'MegatronBertModel', 'MegatronBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
711
"""simple docstring""" def _lowerCamelCase ( __a ): if not isinstance(__a, __a ): SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer' raise TypeError(__a ) if number < 1: SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0' raise ValueError(__a ) SCREAMING_SNAKE_CASE_ = 1 for i in range(1, __a ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
628
0
def A ( lowercase__ : int , lowercase__ : int ) -> int: return int(input_a == input_a == 0 ) def A ( ) -> None: print("""Truth Table of NOR Gate:""" ) print("""| Input 1 | Input 2 | Output |""" ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
45
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class lowercase_ ( __snake_case ): _lowerCamelCase = 'linear' _lowerCamelCase = 'cosine' _lowerCamelCase = 'cosine_with_restarts' _lowerCamelCase = 'polynomial' _lowerCamelCase = 'constant' _lowerCamelCase = 'constant_with_warmup' _lowerCamelCase = 'piecewise_constant' def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]: '''simple docstring''' return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1.0 , __lowercase ) ) return 1.0 return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]: '''simple docstring''' _snake_case : Optional[Any] = {} _snake_case : Optional[int] = step_rules.split("," ) for rule_str in rule_list[:-1]: _snake_case ,_snake_case : str = rule_str.split(":" ) _snake_case : Dict = int(__lowercase ) _snake_case : List[str] = float(__lowercase ) _snake_case : Tuple = value _snake_case : str = float(rule_list[-1] ) def create_rules_function(__lowercase , __lowercase ): def rule_func(__lowercase ) -> float: _snake_case : List[str] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__lowercase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _snake_case : int = create_rules_function(__lowercase , __lowercase ) return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) _snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) _snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]: '''simple docstring''' _snake_case : List[Any] = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _snake_case : Tuple = lr_init - lr_end _snake_case : Any = num_training_steps - num_warmup_steps _snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps _snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__lowercase , __lowercase , __lowercase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]: '''simple docstring''' _snake_case : Any = SchedulerType(__lowercase ) _snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__lowercase , last_epoch=__lowercase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , ) return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase )
670
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowerCAmelCase : Optional[int] = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = ["MaskFormerFeatureExtractor"] _lowerCAmelCase : Dict = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] _lowerCAmelCase : List[str] = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
709
from __future__ import annotations def lowerCAmelCase ( _lowerCAmelCase : int = 4 ): """simple docstring""" UpperCAmelCase__ = abs(_lowerCAmelCase ) or 4 return [[1 + x + y * row_size for x in range(_lowerCAmelCase )] for y in range(_lowerCAmelCase )] def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(_lowerCAmelCase ) ) # OR.. transpose(reverse_column(matrix)) def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(_lowerCAmelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(_lowerCAmelCase ) ) # OR.. transpose(reverse_row(matrix)) def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ): """simple docstring""" UpperCAmelCase__ = [list(_lowerCAmelCase ) for x in zip(*_lowerCAmelCase )] return matrix def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ): """simple docstring""" UpperCAmelCase__ = matrix[::-1] return matrix def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ): """simple docstring""" UpperCAmelCase__ = [x[::-1] for x in matrix] return matrix def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[int] = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 90 counterclockwise:\n") print_matrix(rotate_aa(matrix)) _lowerCAmelCase : Union[str, Any] = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 180:\n") print_matrix(rotate_aaa(matrix)) _lowerCAmelCase : Optional[int] = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 270 counterclockwise:\n") print_matrix(rotate_aaa(matrix))
364
0
from ...configuration_utils import PretrainedConfig _A : Dict = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : Tuple = "tapas" def __init__( self : Dict , A : List[str]=3_0_5_2_2 , A : List[Any]=7_6_8 , A : Tuple=1_2 , A : Any=1_2 , A : int=3_0_7_2 , A : int="gelu" , A : List[str]=0.1 , A : List[Any]=0.1 , A : Optional[int]=1_0_2_4 , A : Dict=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , A : Dict=0.02 , A : List[Any]=1e-12 , A : Tuple=0 , A : Any=10.0 , A : Optional[int]=0 , A : int=1.0 , A : str=None , A : str=1.0 , A : Union[str, Any]=False , A : int=None , A : Tuple=1.0 , A : Union[str, Any]=1.0 , A : Optional[int]=False , A : Any=False , A : Dict="ratio" , A : Dict=None , A : Any=None , A : Tuple=6_4 , A : Union[str, Any]=3_2 , A : Union[str, Any]=False , A : Dict=True , A : List[str]=False , A : Any=False , A : Optional[Any]=True , A : List[str]=False , A : int=None , A : Optional[Any]=None , **A : Tuple , ) ->Tuple: super().__init__(pad_token_id=A , **A ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : int = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : Union[str, Any] = num_attention_heads lowerCamelCase__ : Dict = hidden_act lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Tuple = type_vocab_sizes lowerCamelCase__ : Optional[Any] = initializer_range lowerCamelCase__ : Union[str, Any] = layer_norm_eps # Fine-tuning task hyperparameters lowerCamelCase__ : Optional[Any] = positive_label_weight lowerCamelCase__ : Optional[int] = num_aggregation_labels lowerCamelCase__ : Tuple = aggregation_loss_weight lowerCamelCase__ : Dict = use_answer_as_supervision lowerCamelCase__ : Union[str, Any] = answer_loss_importance lowerCamelCase__ : Optional[Any] = use_normalized_answer_loss lowerCamelCase__ : List[Any] = huber_loss_delta lowerCamelCase__ : int = temperature lowerCamelCase__ : Optional[int] = aggregation_temperature lowerCamelCase__ : Optional[Any] = use_gumbel_for_cells lowerCamelCase__ : Optional[Any] = use_gumbel_for_aggregation lowerCamelCase__ : Tuple = average_approximation_function lowerCamelCase__ : Optional[Any] = cell_selection_preference lowerCamelCase__ : int = answer_loss_cutoff lowerCamelCase__ : str = max_num_rows lowerCamelCase__ : Optional[Any] = max_num_columns lowerCamelCase__ : Any = average_logits_per_cell lowerCamelCase__ : str = select_one_column lowerCamelCase__ : Optional[Any] = allow_empty_column_selection lowerCamelCase__ : str = init_cell_selection_weights_to_zero lowerCamelCase__ : List[Any] = reset_position_index_per_cell lowerCamelCase__ : Dict = disable_per_token_loss # Aggregation hyperparameters lowerCamelCase__ : Any = aggregation_labels lowerCamelCase__ : Union[str, Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , A ): lowerCamelCase__ : str = {int(A ): v for k, v in aggregation_labels.items()}
315
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : str , A : int , A : Tuple=1_3 , A : List[str]=3_0 , A : Any=2 , A : List[Any]=3 , A : Dict=True , A : Tuple=True , A : Optional[int]=3_2 , A : List[Any]=5 , A : Any=4 , A : Optional[int]=3_7 , A : Union[str, Any]="gelu" , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=1_0 , A : Optional[int]=0.02 , ) ->Optional[int]: lowerCamelCase__ : Any = parent lowerCamelCase__ : Any = batch_size lowerCamelCase__ : str = image_size lowerCamelCase__ : Any = patch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : List[Any] = is_training lowerCamelCase__ : str = use_labels lowerCamelCase__ : str = hidden_size lowerCamelCase__ : Optional[Any] = num_hidden_layers lowerCamelCase__ : Any = num_attention_heads lowerCamelCase__ : int = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : int = attention_probs_dropout_prob lowerCamelCase__ : int = type_sequence_label_size lowerCamelCase__ : Optional[Any] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Optional[Any] = (image_size // patch_size) ** 2 lowerCamelCase__ : Optional[Any] = num_patches + 1 def __lowerCamelCase ( self : str ) ->Any: lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : int = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , ) return config, pixel_values def __lowerCamelCase ( self : Tuple , A : List[Any] , A : Optional[int] ) ->int: lowerCamelCase__ : Dict = FlaxViTModel(config=A ) lowerCamelCase__ : Optional[int] = model(A ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ : Optional[Any] = (self.image_size, self.image_size) lowerCamelCase__ : List[str] = (self.patch_size, self.patch_size) lowerCamelCase__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def __lowerCamelCase ( self : Optional[Any] , A : int , A : Optional[int] ) ->Optional[int]: lowerCamelCase__ : Optional[int] = self.type_sequence_label_size lowerCamelCase__ : Optional[Any] = FlaxViTForImageClassification(config=A ) lowerCamelCase__ : int = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : Union[str, Any] = 1 lowerCamelCase__ : int = FlaxViTForImageClassification(A ) lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = model(A ) def __lowerCamelCase ( self : int ) ->str: lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Optional[Any] = config_and_inputs lowerCamelCase__ : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def __lowerCamelCase ( self : Optional[Any] ) ->None: lowerCamelCase__ : int = FlaxViTModelTester(self ) lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 ) def __lowerCamelCase ( self : Any ) ->Dict: self.config_tester.run_common_tests() def __lowerCamelCase ( self : str ) ->List[Any]: lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def __lowerCamelCase ( self : Any ) ->Union[str, Any]: lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def __lowerCamelCase ( self : int ) ->int: lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : int = model_class(A ) lowerCamelCase__ : Any = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : List[Any] = [*signature.parameters.keys()] lowerCamelCase__ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A ) def __lowerCamelCase ( self : int ) ->List[str]: lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase__ : List[str] = self._prepare_for_class(A , A ) lowerCamelCase__ : int = model_class(A ) @jax.jit def model_jitted(A : Union[str, Any] , **A : Union[str, Any] ): return model(pixel_values=A , **A ) with self.subTest('''JIT Enabled''' ): lowerCamelCase__ : Union[str, Any] = model_jitted(**A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCamelCase__ : Optional[Any] = model_jitted(**A ).to_tuple() self.assertEqual(len(A ) , len(A ) ) for jitted_output, output in zip(A , A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCamelCase ( self : Any ) ->Tuple: for model_class_name in self.all_model_classes: lowerCamelCase__ : List[str] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' ) lowerCamelCase__ : Dict = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(A )
315
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ): a_ =StableDiffusionInpaintPipeline a_ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS a_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a_ =frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess a_ =frozenset([] ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCAmelCase__ = CLIPTextModel(__UpperCAmelCase ) lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCAmelCase__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 )-> List[str]: '''simple docstring''' lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) ) lowerCAmelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) ) if str(__UpperCAmelCase ).startswith("mps" ): lowerCAmelCase__ = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def UpperCAmelCase ( self )-> Any: '''simple docstring''' lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = StableDiffusionInpaintPipeline(**__UpperCAmelCase ) lowerCAmelCase__ = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ = sd_pipe(**__UpperCAmelCase ).images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCAmelCase ( self )-> Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) lowerCAmelCase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) lowerCAmelCase__ = "stabilityai/stable-diffusion-2-inpainting" lowerCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() lowerCAmelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench" lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) lowerCAmelCase__ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) lowerCAmelCase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) lowerCAmelCase__ = "stabilityai/stable-diffusion-2-inpainting" lowerCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() lowerCAmelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench" lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) lowerCAmelCase__ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) lowerCAmelCase__ = "stabilityai/stable-diffusion-2-inpainting" lowerCAmelCase__ = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) lowerCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench" lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) lowerCAmelCase__ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
704
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) a_ = logging.getLogger() def _a ( UpperCamelCase_ : Union[str, Any] ) -> Dict: """simple docstring""" lowerCAmelCase__ = {} lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "all_results.json" ) if os.path.exists(UpperCamelCase_ ): with open(UpperCamelCase_ , "r" ) as f: lowerCAmelCase__ = json.load(UpperCamelCase_ ) else: raise ValueError(F"can't find {path}" ) return results a_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowercase__ ( _UpperCAmelCase ): def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' import xla_spawn lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ): lowerCAmelCase__ = time() xla_spawn.main() lowerCAmelCase__ = time() lowerCAmelCase__ = get_results(__UpperCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def UpperCAmelCase ( self )-> Dict: '''simple docstring''' import xla_spawn lowerCAmelCase__ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ): xla_spawn.main()
115
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _lowerCAmelCase = logging.getLogger(__name__) @dataclass class __UpperCamelCase : _UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _UpperCAmelCase = field( default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _UpperCAmelCase = field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) _UpperCAmelCase = field( default=a__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _UpperCAmelCase = field(default=a__ , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __UpperCamelCase : _UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) _UpperCAmelCase = field( default=a__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) _UpperCAmelCase = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _UpperCAmelCase = field( default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowerCamelCase__ ( ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) _lowerCAmelCase : Dict = import_module('tasks' ) try: _lowerCAmelCase : Any = getattr(_lowerCamelCase , model_args.task_type ) _lowerCAmelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _lowerCAmelCase : str = token_classification_task.get_labels(data_args.labels ) _lowerCAmelCase : Dict[int, str] = dict(enumerate(_lowerCamelCase ) ) _lowerCAmelCase : Optional[Any] = len(_lowerCamelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , cache_dir=model_args.cache_dir , ) _lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) _lowerCAmelCase : Optional[Any] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets _lowerCAmelCase : str = ( TokenClassificationDataset( token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _lowerCAmelCase : Any = ( TokenClassificationDataset( token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(_lowerCamelCase , _lowerCamelCase ) -> Tuple[List[int], List[int]]: _lowerCAmelCase : List[Any] = np.argmax(_lowerCamelCase , axis=2 ) _lowerCAmelCase, _lowerCAmelCase : List[str] = preds.shape _lowerCAmelCase : int = [[] for _ in range(_lowerCamelCase )] _lowerCAmelCase : Any = [[] for _ in range(_lowerCamelCase )] for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_lowerCamelCase ) -> Dict: _lowerCAmelCase, _lowerCAmelCase : Optional[int] = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_lowerCamelCase , _lowerCamelCase ), "precision": precision_score(_lowerCamelCase , _lowerCamelCase ), "recall": recall_score(_lowerCamelCase , _lowerCamelCase ), "f1": fa_score(_lowerCamelCase , _lowerCamelCase ), } # Data collator _lowerCAmelCase : List[str] = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _lowerCAmelCase : Optional[Any] = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _lowerCAmelCase : str = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _lowerCAmelCase : Optional[int] = trainer.evaluate() _lowerCAmelCase : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_lowerCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _lowerCamelCase , _lowerCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(_lowerCamelCase ) # Predict if training_args.do_predict: _lowerCAmelCase : List[str] = TokenClassificationDataset( token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = trainer.predict(_lowerCamelCase ) _lowerCAmelCase, _lowerCAmelCase : List[Any] = align_predictions(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : List[str] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(_lowerCamelCase , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , _lowerCamelCase , _lowerCamelCase ) writer.write('%s = %s\n' % (key, value) ) # Save predictions _lowerCAmelCase : Optional[Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_lowerCamelCase , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return results def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' main() if __name__ == "__main__": main()
259
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCamelCase ( a__ , unittest.TestCase ): _UpperCAmelCase = ShapEImgaImgPipeline _UpperCAmelCase = ["image"] _UpperCAmelCase = ["image"] _UpperCAmelCase = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] _UpperCAmelCase = False @property def __lowerCamelCase ( self ): '''simple docstring''' return 32 @property def __lowerCamelCase ( self ): '''simple docstring''' return 32 @property def __lowerCamelCase ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): '''simple docstring''' return 8 @property def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : str = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,) _lowerCAmelCase : Dict = CLIPVisionModel(_A ) return model @property def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = CLIPImageProcessor( crop_size=224 ,do_center_crop=_A ,do_normalize=_A ,do_resize=_A ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,) return image_processor @property def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } _lowerCAmelCase : int = PriorTransformer(**_A ) return model @property def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Union[str, Any] = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } _lowerCAmelCase : List[str] = ShapERenderer(**_A ) return model def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : int = self.dummy_prior _lowerCAmelCase : Union[str, Any] = self.dummy_image_encoder _lowerCAmelCase : List[Any] = self.dummy_image_processor _lowerCAmelCase : List[str] = self.dummy_renderer _lowerCAmelCase : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' ,num_train_timesteps=1024 ,prediction_type='sample' ,use_karras_sigmas=_A ,clip_sample=_A ,clip_sample_range=1.0 ,) _lowerCAmelCase : List[str] = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowerCamelCase ( self ,_A ,_A=0 ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('mps' ): _lowerCAmelCase : Dict = torch.manual_seed(_A ) else: _lowerCAmelCase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A ) _lowerCAmelCase : int = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : str = 'cpu' _lowerCAmelCase : List[str] = self.get_dummy_components() _lowerCAmelCase : Optional[int] = self.pipeline_class(**_A ) _lowerCAmelCase : Tuple = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _lowerCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) ) _lowerCAmelCase : Optional[int] = output.images[0] _lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _lowerCAmelCase : Union[str, Any] = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = torch_device == 'cpu' _lowerCAmelCase : List[Any] = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=_A ,relax_max_difference=_A ,) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.get_dummy_components() _lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_A ) _lowerCAmelCase : List[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _lowerCAmelCase : Tuple = 1 _lowerCAmelCase : str = 2 _lowerCAmelCase : Any = self.get_dummy_inputs(_A ) for key in inputs.keys(): if key in self.batch_params: _lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]] _lowerCAmelCase : Tuple = pipe(**_A ,num_images_per_prompt=_A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) _lowerCAmelCase : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) _lowerCAmelCase : Optional[int] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) _lowerCAmelCase : Union[str, Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _lowerCAmelCase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(0 ) _lowerCAmelCase : Tuple = pipe( _A ,generator=_A ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_A ,_A )
259
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch A_ = logging.get_logger(__name__) class _snake_case ( _a ): _A : List[str] = ["""pixel_values"""] def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Dict = True ,SCREAMING_SNAKE_CASE__ : Dict = None ,SCREAMING_SNAKE_CASE__ : Optional[Any] = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : Optional[Any] = True ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : Any = True ,SCREAMING_SNAKE_CASE__ : str = 1 / 255 ,SCREAMING_SNAKE_CASE__ : List[Any] = True ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : int = None ,**SCREAMING_SNAKE_CASE__ : Dict ,): super().__init__(**snake_case_ ) SCREAMING_SNAKE_CASE:List[str] = size if size is not None else {"shortest_edge": 256} SCREAMING_SNAKE_CASE:Optional[int] = get_size_dict(snake_case_ ,default_to_square=snake_case_ ) SCREAMING_SNAKE_CASE:Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE:Dict = get_size_dict(snake_case_ ,param_name="crop_size" ) SCREAMING_SNAKE_CASE:Any = do_resize SCREAMING_SNAKE_CASE:Tuple = size SCREAMING_SNAKE_CASE:int = resample SCREAMING_SNAKE_CASE:Union[str, Any] = do_center_crop SCREAMING_SNAKE_CASE:int = crop_size SCREAMING_SNAKE_CASE:List[Any] = do_rescale SCREAMING_SNAKE_CASE:int = rescale_factor SCREAMING_SNAKE_CASE:Tuple = do_normalize SCREAMING_SNAKE_CASE:List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE:Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Dict = None ,**SCREAMING_SNAKE_CASE__ : int ,): SCREAMING_SNAKE_CASE:Tuple = get_size_dict(snake_case_ ,default_to_square=snake_case_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE:Optional[int] = get_resize_output_image_size(snake_case_ ,size=size["shortest_edge"] ,default_to_square=snake_case_ ) return resize(snake_case_ ,size=snake_case_ ,resample=snake_case_ ,data_format=snake_case_ ,**snake_case_ ) def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,): SCREAMING_SNAKE_CASE:Union[str, Any] = get_size_dict(snake_case_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(snake_case_ ,size=(size["height"], size["width"]) ,data_format=snake_case_ ,**snake_case_ ) def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = None ,**SCREAMING_SNAKE_CASE__ : int ): return rescale(snake_case_ ,scale=snake_case_ ,data_format=snake_case_ ,**snake_case_ ) def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,): return normalize(snake_case_ ,mean=snake_case_ ,std=snake_case_ ,data_format=snake_case_ ,**snake_case_ ) def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,SCREAMING_SNAKE_CASE__ : List[str] = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = None ,SCREAMING_SNAKE_CASE__ : List[str] = None ,SCREAMING_SNAKE_CASE__ : Dict = None ,SCREAMING_SNAKE_CASE__ : List[Any] = None ,SCREAMING_SNAKE_CASE__ : Tuple = None ,SCREAMING_SNAKE_CASE__ : Dict = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : Dict = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Dict ,): SCREAMING_SNAKE_CASE:List[str] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE:str = size if size is not None else self.size SCREAMING_SNAKE_CASE:Tuple = get_size_dict(snake_case_ ,default_to_square=snake_case_ ) SCREAMING_SNAKE_CASE:List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE:Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE:Tuple = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE:Optional[int] = get_size_dict(snake_case_ ,param_name="crop_size" ) SCREAMING_SNAKE_CASE:Any = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE:int = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE:Any = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE:Optional[Any] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE:int = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE:str = make_list_of_images(snake_case_ ) if not valid_images(snake_case_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE:Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE:str = [self.resize(image=snake_case_ ,size=snake_case_ ,resample=snake_case_ ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE:Optional[int] = [self.center_crop(image=snake_case_ ,size=snake_case_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE:List[Any] = [self.rescale(image=snake_case_ ,scale=snake_case_ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE:Union[str, Any] = [self.normalize(image=snake_case_ ,mean=snake_case_ ,std=snake_case_ ) for image in images] SCREAMING_SNAKE_CASE:Optional[int] = [to_channel_dimension_format(snake_case_ ,snake_case_ ) for image in images] SCREAMING_SNAKE_CASE:Optional[int] = {"pixel_values": images} return BatchFeature(data=snake_case_ ,tensor_type=snake_case_ ) def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str = None ): SCREAMING_SNAKE_CASE:Optional[int] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(snake_case_ ) != len(snake_case_ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(snake_case_ ): SCREAMING_SNAKE_CASE:Union[str, Any] = target_sizes.numpy() SCREAMING_SNAKE_CASE:Tuple = [] for idx in range(len(snake_case_ ) ): SCREAMING_SNAKE_CASE:Optional[int] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="bilinear" ,align_corners=snake_case_ ) SCREAMING_SNAKE_CASE:int = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(snake_case_ ) else: SCREAMING_SNAKE_CASE:Tuple = logits.argmax(dim=1 ) SCREAMING_SNAKE_CASE:str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
720
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): A_ = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: A_ = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def A_ ( snake_case ): SCREAMING_SNAKE_CASE:Any = (images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE:Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE:str = numpy_to_pil(snake_case ) return images def A_ ( snake_case ): if images.ndim == 3: SCREAMING_SNAKE_CASE:List[str] = images[None, ...] SCREAMING_SNAKE_CASE:List[Any] = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images SCREAMING_SNAKE_CASE:Any = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: SCREAMING_SNAKE_CASE:int = [Image.fromarray(snake_case ) for image in images] return pil_images
465
0
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def a_ (__A , __A , __A ) -> List[str]: """simple docstring""" __a : Any = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] __a : List[Any] = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } __a : int = f'{src_lang}-{tgt_lang}' __a : Tuple = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(__A , exist_ok=__A ) __a : Any = os.path.join(__A , "README.md" ) print(f'Generating {path}' ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(__A ) # make sure we are under the root of the project UpperCAmelCase__ = Path(__file__).resolve().parent.parent.parent UpperCAmelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = model_name.split('''-''') UpperCAmelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
351
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class snake_case_ ( unittest.TestCase ): """simple docstring""" def __init__(self: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: List[str]=13 , __UpperCAmelCase: Any=7 , __UpperCAmelCase: List[str]=True , __UpperCAmelCase: Optional[int]=True , __UpperCAmelCase: Dict=True , __UpperCAmelCase: Optional[Any]=True , __UpperCAmelCase: Optional[int]=99 , __UpperCAmelCase: Optional[Any]=32 , __UpperCAmelCase: int=5 , __UpperCAmelCase: Dict=4 , __UpperCAmelCase: Optional[int]=37 , __UpperCAmelCase: int="gelu" , __UpperCAmelCase: Tuple=0.1 , __UpperCAmelCase: Any=0.1 , __UpperCAmelCase: Union[str, Any]=512 , __UpperCAmelCase: Optional[Any]=16 , __UpperCAmelCase: List[Any]=2 , __UpperCAmelCase: str=0.02 , __UpperCAmelCase: int=4 , ) -> str: '''simple docstring''' __a : Tuple = parent __a : int = batch_size __a : Optional[int] = seq_length __a : List[Any] = is_training __a : Tuple = use_attention_mask __a : Optional[int] = use_token_type_ids __a : Tuple = use_labels __a : str = vocab_size __a : Union[str, Any] = hidden_size __a : List[str] = num_hidden_layers __a : Optional[int] = num_attention_heads __a : Any = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Tuple = max_position_embeddings __a : Optional[int] = type_vocab_size __a : Tuple = type_sequence_label_size __a : List[Any] = initializer_range __a : int = num_choices def UpperCAmelCase__ (self: Tuple ) -> List[Any]: '''simple docstring''' __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Dict = None if self.use_attention_mask: __a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __a : Optional[int] = None if self.use_token_type_ids: __a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Dict = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ (self: Optional[Any] ) -> Union[str, Any]: '''simple docstring''' __a : Union[str, Any] = self.prepare_config_and_inputs() __a , __a , __a , __a : Dict = config_and_inputs __a : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def UpperCAmelCase__ (self: Dict ) -> str: '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a , __a , __a , __a : Tuple = config_and_inputs __a : int = True __a : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class snake_case_ ( __UpperCamelCase , unittest.TestCase ): """simple docstring""" snake_case__ = True snake_case__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ (self: Dict ) -> Union[str, Any]: '''simple docstring''' __a : Tuple = FlaxRobertaModelTester(self ) @slow def UpperCAmelCase__ (self: Optional[Any] ) -> List[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: __a : int = model_class_name.from_pretrained("roberta-base" , from_pt=__UpperCAmelCase ) __a : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(__UpperCAmelCase )
351
1
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=[30, 30] , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=None , UpperCamelCase_=8 , UpperCamelCase_=10 , ): __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = is_training __magic_name__ = use_labels __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = scope __magic_name__ = n_targets __magic_name__ = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __magic_name__ = (image_size[1] // patch_size) * (image_size[0] // patch_size) __magic_name__ = num_patches + 1 + self.num_detection_tokens def lowerCAmelCase__ ( self ): __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) __magic_name__ = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __magic_name__ = [] for i in range(self.batch_size ): __magic_name__ = {} __magic_name__ = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase_ ) __magic_name__ = torch.rand(self.n_targets , 4 , device=UpperCamelCase_ ) labels.append(UpperCamelCase_ ) __magic_name__ = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __magic_name__ = YolosModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __magic_name__ = model(UpperCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __magic_name__ = YolosForObjectDetection(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __magic_name__ = model(pixel_values=UpperCamelCase_ ) __magic_name__ = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) __magic_name__ = model(pixel_values=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def lowerCAmelCase__ ( self ): __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): _lowerCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () _lowerCamelCase = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ): __magic_name__ = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __magic_name__ = [] for i in range(self.model_tester.batch_size ): __magic_name__ = {} __magic_name__ = torch.ones( size=(self.model_tester.n_targets,) , device=UpperCamelCase_ , dtype=torch.long ) __magic_name__ = torch.ones( self.model_tester.n_targets , 4 , device=UpperCamelCase_ , dtype=torch.float ) labels.append(UpperCamelCase_ ) __magic_name__ = labels return inputs_dict def lowerCAmelCase__ ( self ): __magic_name__ = YolosModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): # YOLOS does not use inputs_embeds pass def lowerCAmelCase__ ( self ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) ) def lowerCAmelCase__ ( self ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase_ ) __magic_name__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self ): __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = True # in YOLOS, the seq_len is different __magic_name__ = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) __magic_name__ = outputs.attentions self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __magic_name__ = True __magic_name__ = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) __magic_name__ = outputs.attentions self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __magic_name__ = len(UpperCamelCase_ ) # Check attention is always last and order is fine __magic_name__ = True __magic_name__ = True __magic_name__ = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) __magic_name__ = 1 self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) ) __magic_name__ = outputs.attentions self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowerCAmelCase__ ( self ): def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __magic_name__ = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) __magic_name__ = outputs.hidden_states __magic_name__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # YOLOS has a different seq_length __magic_name__ = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = YolosModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowercase ( ) -> List[Any]: __magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def lowerCAmelCase__ ( self ): return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self ): __magic_name__ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase_ ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): __magic_name__ = model(inputs.pixel_values ) # verify outputs __magic_name__ = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) __magic_name__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase_ , ) __magic_name__ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) # verify postprocessing __magic_name__ = image_processor.post_process_object_detection( UpperCamelCase_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] __magic_name__ = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase_ ) __magic_name__ = [75, 75, 17, 63, 17] __magic_name__ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase_ ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase_ , atol=1E-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase_ ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase_ ) )
702
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _lowercase ( __UpperCAmelCase , unittest.TestCase ): _lowerCamelCase = ReformerTokenizer _lowerCamelCase = ReformerTokenizerFast _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = True def lowerCAmelCase__ ( self ): super().setUp() __magic_name__ = ReformerTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self ): __magic_name__ = '''<s>''' __magic_name__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): __magic_name__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(UpperCamelCase_ ) , 1000 ) def lowerCAmelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCAmelCase__ ( self ): if not self.test_rust_tokenizer: return __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_rust_tokenizer() __magic_name__ = '''I was born in 92000, and this is falsé.''' __magic_name__ = tokenizer.tokenize(UpperCamelCase_ ) __magic_name__ = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) __magic_name__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = self.get_rust_tokenizer() __magic_name__ = tokenizer.encode(UpperCamelCase_ ) __magic_name__ = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) # Simple input __magic_name__ = '''This is a simple input''' __magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2'''] __magic_name__ = ('''This is a simple input''', '''This is a pair''') __magic_name__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' , ) def lowerCAmelCase__ ( self ): pass def lowerCAmelCase__ ( self ): __magic_name__ = ReformerTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) __magic_name__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [285, 46, 10, 170, 382] , ) __magic_name__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowerCAmelCase__ ( self ): return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def lowerCAmelCase__ ( self ): __magic_name__ = '''Hello World!''' __magic_name__ = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): __magic_name__ = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __magic_name__ = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @require_torch @slow def lowerCAmelCase__ ( self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence __magic_name__ = list(self.big_tokenizer.get_vocab().keys() )[:10] __magic_name__ = ''' '''.join(UpperCamelCase_ ) __magic_name__ = self.big_tokenizer.encode_plus(UpperCamelCase_ , return_tensors='''pt''' ) __magic_name__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) __magic_name__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) __magic_name__ = encoded_sequence['''input_ids'''].shape __magic_name__ = ReformerModel(UpperCamelCase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCamelCase_ ) model(**UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self ): # fmt: off __magic_name__ = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 __magic_name__ = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCamelCase_ , sequences=UpperCamelCase_ , )
190
0
'''simple docstring''' from __future__ import annotations def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] ) -> bool: """simple docstring""" return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
51
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowerCamelCase : Tuple ='''true''' def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=82 , __lowerCAmelCase=16 ) -> Optional[int]: set_seed(42 ) UpperCamelCase__ : Union[str, Any] = RegressionModel() UpperCamelCase__ : Optional[Any] = deepcopy(__lowerCAmelCase ) UpperCamelCase__ : str = RegressionDataset(length=__lowerCAmelCase ) UpperCamelCase__ : int = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase ) model.to(accelerator.device ) UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict: UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) UpperCamelCase__ : Optional[Any] = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(__lowerCAmelCase ): UpperCamelCase__ : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs with accelerator.main_process_first(): UpperCamelCase__ : Optional[int] = dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) UpperCamelCase__ : str = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__lowerCAmelCase ): if use_longest: return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=16 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: UpperCamelCase__ : int = Accelerator(dispatch_batches=__lowerCAmelCase , split_batches=__lowerCAmelCase ) UpperCamelCase__ : int = get_dataloader(__lowerCAmelCase , not dispatch_batches ) UpperCamelCase__ : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=__lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: UpperCamelCase__ : List[Any] = [] for batch in dataloader: UpperCamelCase__ , UpperCamelCase__ : str = batch.values() with torch.no_grad(): UpperCamelCase__ : Optional[Any] = model(__lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ : Tuple = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) UpperCamelCase__ , UpperCamelCase__ : Any = [], [] for logit, targ in logits_and_targets: logits.append(__lowerCAmelCase ) targs.append(__lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ : int = torch.cat(__lowerCAmelCase ), torch.cat(__lowerCAmelCase ) return logits, targs def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=82 , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=16 ) -> Any: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = get_basic_setup(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ : List[Any] = generate_predictions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) assert ( len(__lowerCAmelCase ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowerCAmelCase )}' def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = False , __lowerCAmelCase = False ) -> List[Any]: UpperCamelCase__ : str = evaluate.load("glue" , "mrpc" ) UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = get_mrpc_setup(__lowerCAmelCase , __lowerCAmelCase ) # First do baseline UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : int = setup["no"] model.to(__lowerCAmelCase ) model.eval() for batch in dataloader: batch.to(__lowerCAmelCase ) with torch.inference_mode(): UpperCamelCase__ : Optional[Any] = model(**__lowerCAmelCase ) UpperCamelCase__ : Union[str, Any] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__lowerCAmelCase , references=batch["labels"] ) UpperCamelCase__ : Optional[int] = metric.compute() # Then do distributed UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): UpperCamelCase__ : Optional[Any] = model(**__lowerCAmelCase ) UpperCamelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 ) UpperCamelCase__ : Dict = batch["labels"] UpperCamelCase__ , UpperCamelCase__ : List[Any] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__lowerCAmelCase , references=__lowerCAmelCase ) UpperCamelCase__ : str = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: UpperCamelCase__ : Optional[int] = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(__lowerCAmelCase , __lowerCAmelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: UpperCamelCase__ : Optional[int] = Accelerator(split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(__lowerCAmelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) UpperCamelCase__ : Union[str, Any] = Accelerator() test_torch_metrics(__lowerCAmelCase , 512 ) accelerator.state._reset_state() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
228
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { 'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST', 'BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
719
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_bigcode""" snake_case_ = ["""past_key_values"""] snake_case_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions SCREAMING_SNAKE_CASE__ : Dict =n_embd SCREAMING_SNAKE_CASE__ : Dict =n_layer SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head SCREAMING_SNAKE_CASE__ : List[str] =n_inner SCREAMING_SNAKE_CASE__ : List[str] =activation_function SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon SCREAMING_SNAKE_CASE__ : List[str] =initializer_range SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : Dict =multi_query SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
665
0
from dataclasses import dataclass, field from typing import Optional @dataclass class lowerCamelCase_ : a__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) a__ = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) a__ = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) a__ = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) a__ = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) a__ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) a__ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) a__ = field( default=1_00_00 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) a__ = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} ) a__ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) a__ = field( default=7_50 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) a__ = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) a__ = field(default=5_00_00 , metadata={'''help''': '''Maximum number of training steps.'''} ) a__ = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) a__ = field(default=10_24 , metadata={'''help''': '''Sequence lengths used for training.'''} ) a__ = field(default=1 , metadata={'''help''': '''Training seed.'''} ) a__ = field( default=10_24 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class lowerCamelCase_ : a__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) a__ = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) a__ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) a__ = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) a__ = field(default=10_24 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) a__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class lowerCamelCase_ : a__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) a__ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) a__ = field(default=2_56 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) a__ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) a__ = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) a__ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) a__ = field( default=2_00 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) a__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) a__ = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) a__ = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) a__ = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class lowerCamelCase_ : a__ = field( default=lowerCamelCase , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) a__ = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) a__ = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) a__ = field( default=10_00_00 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) a__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) a__ = field( default=10_00 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) a__ = field( default=1_00 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) a__ = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) a__ = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) a__ = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) a__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) a__ = field( default=lowerCamelCase , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) a__ = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class lowerCamelCase_ : a__ = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) a__ = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) a__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) a__ = field(default=20_00_00 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) a__ = field( default=3_27_68 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) a__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class lowerCamelCase_ : a__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) a__ = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) a__ = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class lowerCamelCase_ : a__ = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) a__ = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) a__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) a__ = field(default=lowerCamelCase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase__ = { "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
117
0
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _lowercase ( lowerCamelCase__ : Dict[str, torch.Tensor] ): _a = [] _a = [] _a = [] for rt in rc.restypes: _a = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) _a = {name: i for i, name in enumerate(lowerCamelCase__ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) _a = torch.tensor( lowerCamelCase__, dtype=torch.intaa, device=protein["aatype"].device, ) _a = torch.tensor( lowerCamelCase__, dtype=torch.intaa, device=protein["aatype"].device, ) _a = torch.tensor( lowerCamelCase__, dtype=torch.floataa, device=protein["aatype"].device, ) _a = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein _a = restype_atomaa_to_atomaa[protein_aatype] _a = restype_atomaa_mask[protein_aatype] _a = residx_atomaa_mask _a = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back _a = restype_atomaa_to_atomaa[protein_aatype] _a = residx_atomaa_to_atomaa.long() # create the corresponding mask _a = torch.zeros([21, 37], dtype=torch.floataa, device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): _a = rc.restype_atoa[restype_letter] _a = rc.residue_atoms[restype_name] for atom_name in atom_names: _a = rc.atom_order[atom_name] _a = 1 _a = restype_atomaa_mask[protein_aatype] _a = residx_atomaa_mask return protein def _lowercase ( lowerCamelCase__ : Dict[str, torch.Tensor] ): _a = tree_map(lambda lowerCamelCase__ : torch.tensor(lowerCamelCase__, device=batch["aatype"].device ), lowerCamelCase__, np.ndarray ) _a = tensor_tree_map(lambda lowerCamelCase__ : np.array(lowerCamelCase__ ), make_atomaa_masks(lowerCamelCase__ ) ) return out
715
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : int = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
691
0
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json", "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json", "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json", } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'owlvit_text_model' def __init__( self , lowercase=49_408 , lowercase=512 , lowercase=2_048 , lowercase=12 , lowercase=8 , lowercase=16 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=0 , lowercase=49_406 , lowercase=49_407 , **lowercase , ) -> Tuple: super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = intermediate_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = max_position_embeddings lowerCAmelCase = hidden_act lowerCAmelCase = layer_norm_eps lowerCAmelCase = attention_dropout lowerCAmelCase = initializer_range lowerCAmelCase = initializer_factor @classmethod def _snake_case ( cls , lowercase , **lowercase ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowercase ) lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(lowercase , **lowercase ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCAmelCase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'owlvit_vision_model' def __init__( self , lowercase=768 , lowercase=3_072 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=768 , lowercase=32 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , **lowercase , ) -> int: super().__init__(**lowercase ) lowerCAmelCase = hidden_size lowerCAmelCase = intermediate_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = hidden_act lowerCAmelCase = layer_norm_eps lowerCAmelCase = attention_dropout lowerCAmelCase = initializer_range lowerCAmelCase = initializer_factor @classmethod def _snake_case ( cls , lowercase , **lowercase ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowercase ) lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(lowercase , **lowercase ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCAmelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'owlvit' _SCREAMING_SNAKE_CASE = True def __init__( self , lowercase=None , lowercase=None , lowercase=512 , lowercase=2.6_592 , lowercase=True , **lowercase , ) -> Tuple: super().__init__(**lowercase ) if text_config is None: lowerCAmelCase = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: lowerCAmelCase = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) lowerCAmelCase = OwlViTTextConfig(**lowercase ) lowerCAmelCase = OwlViTVisionConfig(**lowercase ) lowerCAmelCase = projection_dim lowerCAmelCase = logit_scale_init_value lowerCAmelCase = return_dict lowerCAmelCase = 1.0 @classmethod def _snake_case ( cls , lowercase , **lowercase ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowercase ) lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(lowercase , **lowercase ) if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) @classmethod def _snake_case ( cls , lowercase , lowercase , **lowercase ) -> Optional[Any]: lowerCAmelCase = {} lowerCAmelCase = text_config lowerCAmelCase = vision_config return cls.from_dict(lowercase , **lowercase ) def _snake_case ( self ) -> Any: lowerCAmelCase = copy.deepcopy(self.__dict__ ) lowerCAmelCase = self.text_config.to_dict() lowerCAmelCase = self.vision_config.to_dict() lowerCAmelCase = self.__class__.model_type return output class lowercase ( _UpperCAmelCase ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _snake_case ( self ) -> float: return 1e-4 def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = None , ) -> Mapping[str, Any]: lowerCAmelCase = super().generate_dummy_inputs( processor.tokenizer , batch_size=lowercase , seq_length=lowercase , framework=lowercase ) lowerCAmelCase = super().generate_dummy_inputs( processor.image_processor , batch_size=lowercase , framework=lowercase ) return {**text_input_dict, **image_input_dict} @property def _snake_case ( self ) -> int: return 14
532
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) SCREAMING_SNAKE_CASE__ = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class lowercase ( unittest.TestCase ): def _snake_case ( self , lowercase , lowercase , lowercase = None , lowercase = None ) -> Dict: lowerCAmelCase = None lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) ) lowerCAmelCase = os.path.abspath("""examples""" ) for item in os.listdir(lowercase ): if item not in EXCLUDE_EXAMPLES: lowerCAmelCase = os.path.join(lowercase , lowercase ) if os.path.isfile(lowercase ) and ".py" in item_path: with self.subTest( tested_script=lowercase , feature_script=lowercase , tested_section="""main()""" if parser_only else """training_function()""" , ): lowerCAmelCase = compare_against_test( os.path.join(lowercase , lowercase ) , lowercase , lowercase , lowercase ) lowerCAmelCase = """\n""".join(lowercase ) if special_strings is not None: for string in special_strings: lowerCAmelCase = diff.replace(lowercase , """""" ) self.assertEqual(lowercase , """""" ) def _snake_case ( self ) -> List[Any]: self.one_complete_example("""complete_nlp_example.py""" , lowercase ) self.one_complete_example("""complete_nlp_example.py""" , lowercase ) def _snake_case ( self ) -> Any: lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) ) lowerCAmelCase = [ """ """ * 16 + """{\n\n""", """ """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""", """ """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""", """ """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""", """ """ * 20 + """\"epoch\": epoch,\n\n""", """ """ * 16 + """},\n\n""", """ """ * 16 + """step=epoch,\n""", """ """ * 12, """ """ * 8 + """for step, batch in enumerate(active_dataloader):\n""", ] self.one_complete_example("""complete_cv_example.py""" , lowercase , lowercase , lowercase ) self.one_complete_example("""complete_cv_example.py""" , lowercase , lowercase , lowercase ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = False @classmethod def _snake_case ( cls ) -> Optional[int]: super().setUpClass() lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = os.path.join(cls._tmpdir , """default_config.yml""" ) write_basic_config(save_location=cls.configPath ) lowerCAmelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def _snake_case ( cls ) -> Optional[int]: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _snake_case ( self ) -> str: lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) ) def _snake_case ( self ) -> Optional[int]: lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split() lowerCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) ) def _snake_case ( self ) -> List[str]: lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split() lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase ) self.assertNotIn("""epoch 0:""" , lowercase ) self.assertIn("""epoch 1:""" , lowercase ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split() lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase ) if torch.cuda.is_available(): lowerCAmelCase = torch.cuda.device_count() else: lowerCAmelCase = 1 if num_processes > 1: self.assertNotIn("""epoch 0:""" , lowercase ) self.assertIn("""epoch 1:""" , lowercase ) else: self.assertIn("""epoch 0:""" , lowercase ) self.assertIn("""epoch 1:""" , lowercase ) @slow def _snake_case ( self ) -> Tuple: lowerCAmelCase = """ examples/by_feature/cross_validation.py --num_folds 2 """.split() with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ): lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase ) lowerCAmelCase = re.findall("""({.+})""" , lowercase ) lowerCAmelCase = [r for r in results if """accuracy""" in r][-1] lowerCAmelCase = ast.literal_eval(lowercase ) self.assertGreaterEqual(results["""accuracy"""] , 0.75 ) def _snake_case ( self ) -> int: lowerCAmelCase = ["""examples/by_feature/multi_process_metrics.py"""] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def _snake_case ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdir: lowerCAmelCase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(lowercase , """tracking""" ) ) ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = ["""examples/by_feature/gradient_accumulation.py"""] run_command(self._launch_args + testargs ) def _snake_case ( self ) -> int: lowerCAmelCase = ["""examples/by_feature/local_sgd.py"""] run_command(self._launch_args + testargs )
532
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase__ : '''simple docstring''' def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=128 , _lowerCAmelCase=32 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): a =parent a =batch_size a =seq_length a =is_training a =use_input_mask a =use_token_type_ids a =use_labels a =vocab_size a =hidden_size a =num_hidden_layers a =num_attention_heads a =intermediate_size a =hidden_act a =hidden_dropout_prob a =attention_probs_dropout_prob a =max_position_embeddings a =type_vocab_size a =type_sequence_label_size a =initializer_range a =num_labels a =num_choices a =scope def lowerCAmelCase__ ( self ): a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a =None if self.use_input_mask: a =random_attention_mask([self.batch_size, self.seq_length] ) a =None if self.use_token_type_ids: a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a =None a =None a =None if self.use_labels: a =ids_tensor([self.batch_size] , self.type_sequence_label_size ) a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a =ids_tensor([self.batch_size] , self.num_choices ) a =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self ): ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) =self.prepare_config_and_inputs() a =True a =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =NezhaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) a =model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) a =model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): a =True a =NezhaModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) a =model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =NezhaForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =NezhaForNextSentencePrediction(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =NezhaForPreTraining(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =NezhaForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =self.num_labels a =NezhaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =self.num_labels a =NezhaForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =self.num_choices a =NezhaForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a =model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self ): a =self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) =config_and_inputs a ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[Any] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Optional[Any] = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : List[str] = True def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ): a =super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class in get_values(_lowerCAmelCase ): a =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase ) a =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) return inputs_dict def lowerCAmelCase__ ( self ): a =NezhaModelTester(self ) a =ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) =self.model_tester.prepare_config_and_inputs_for_decoder() a =None self.model_tester.create_and_check_model_as_decoder( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @slow def lowerCAmelCase__ ( self ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a =NezhaModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @slow @require_torch_gpu def lowerCAmelCase__ ( self ): a , a =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return a =True a =model_class(config=_lowerCAmelCase ) a =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) a =torch.jit.trace( _lowerCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """bert.pt""" ) ) a =torch.jit.load(os.path.join(_lowerCAmelCase , """bert.pt""" ) , map_location=_lowerCAmelCase ) loaded(inputs_dict["""input_ids"""].to(_lowerCAmelCase ) , inputs_dict["""attention_mask"""].to(_lowerCAmelCase ) ) @require_torch class UpperCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self ): a =NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) a =torch.tensor([[0, 1, 2, 3, 4, 5]] ) a =torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] a =torch.Size((1, 6, 768) ) self.assertEqual(output.shape , _lowerCAmelCase ) a =torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) ) @slow def lowerCAmelCase__ ( self ): a =NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) a =torch.tensor([[0, 1, 2, 3, 4, 5]] ) a =torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] a =torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape , _lowerCAmelCase ) a =torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
321
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase__ : '''simple docstring''' def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=None , ): a =parent a =batch_size a =image_size a =patch_size a =num_channels a =is_training a =use_labels a =hidden_size a =num_hidden_layers a =num_attention_heads a =intermediate_size a =hidden_act a =hidden_dropout_prob a =attention_probs_dropout_prob a =type_sequence_label_size a =initializer_range a =scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a =(image_size // patch_size) ** 2 a =num_patches + 1 def lowerCAmelCase__ ( self ): a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a =None if self.use_labels: a =ids_tensor([self.batch_size] , self.type_sequence_label_size ) a =self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =ViTMSNModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a =self.type_sequence_label_size a =ViTMSNForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =model(_lowerCAmelCase , labels=_lowerCAmelCase ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a =1 a =ViTMSNForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() a =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a =model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase__ ( self ): a =self.prepare_config_and_inputs() a , a , a =config_and_inputs a ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : int = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : str = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : str = False _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : List[str] = False def lowerCAmelCase__ ( self ): a =ViTMSNModelTester(self ) a =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def lowerCAmelCase__ ( self ): pass def lowerCAmelCase__ ( self ): a , a =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a =model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a =model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def lowerCAmelCase__ ( self ): a , a =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a =model_class(_lowerCAmelCase ) a =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a =[*signature.parameters.keys()] a =["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def lowerCAmelCase__ ( self ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a =ViTMSNModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def lowerCamelCase ( )-> Dict: """simple docstring""" a =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase__ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self ): torch.manual_seed(2 ) a =ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_lowerCAmelCase ) a =self.default_image_processor a =prepare_img() a =image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): a =model(**_lowerCAmelCase ) # verify the logits a =torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) a =torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
321
1
def _a ( __lowercase ) -> str: """simple docstring""" __UpperCamelCase = '' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _a ( __lowercase ) -> dict[str, str]: """simple docstring""" __UpperCamelCase = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __UpperCamelCase = remove_duplicates(key.upper() ) __UpperCamelCase = len(__lowercase ) # First fill cipher with key characters __UpperCamelCase = {alphabet[i]: char for i, char in enumerate(__lowercase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(__lowercase ) , 26 ): __UpperCamelCase = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __UpperCamelCase = alphabet[i - offset] __UpperCamelCase = char return cipher_alphabet def _a ( __lowercase , __lowercase ) -> str: """simple docstring""" return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() ) def _a ( __lowercase , __lowercase ) -> str: """simple docstring""" __UpperCamelCase = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() ) def _a ( ) -> None: """simple docstring""" __UpperCamelCase = input('Enter message to encode or decode: ' ).strip() __UpperCamelCase = input('Enter keyword: ' ).strip() __UpperCamelCase = input('Encipher or decipher? E/D:' ).strip()[0].lower() try: __UpperCamelCase = {'e': encipher, 'd': decipher}[option] except KeyError: raise KeyError('invalid input option' ) __UpperCamelCase = create_cipher_map(__lowercase ) print(func(__lowercase , __lowercase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
383
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase_ : """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> Any: __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCamelCase = (image_size // patch_size) ** 2 __UpperCamelCase = num_patches + 1 def __lowercase( self ) -> Optional[Any]: __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def __lowercase( self ) -> str: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: __UpperCamelCase = TFViTModel(config=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. __UpperCamelCase = self.image_size // 2 __UpperCamelCase = pixel_values[:, :, :image_size, :image_size] __UpperCamelCase = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: __UpperCamelCase = self.type_sequence_label_size __UpperCamelCase = TFViTForImageClassification(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. __UpperCamelCase = self.image_size // 2 __UpperCamelCase = pixel_values[:, :, :image_size, :image_size] __UpperCamelCase = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __UpperCamelCase = 1 __UpperCamelCase = TFViTForImageClassification(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCamelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self ) -> Dict: __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCAmelCase__ = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def __lowercase( self ) -> List[str]: __UpperCamelCase = TFViTModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowercase( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def __lowercase( self ) -> str: pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def __lowercase( self ) -> Tuple: pass def __lowercase( self ) -> int: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) ) def __lowercase( self ) -> Dict: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def __lowercase( self ) -> str: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def __lowercase( self ) -> str: __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def __lowercase( self ) -> List[Any]: __UpperCamelCase = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def _a ( ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase( self ) -> List[Any]: return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def __lowercase( self ) -> Dict: __UpperCamelCase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' ) # forward pass __UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ) # verify the logits __UpperCamelCase = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) __UpperCamelCase = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
383
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Optional[Any] = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : List[Any] = { 'gpt-neox-20b': 2048, } class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = ["""input_ids""", """attention_mask"""] def __init__( self : Any , UpperCamelCase : str=None , UpperCamelCase : str=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int="<|endoftext|>" , UpperCamelCase : Dict="<|endoftext|>" , UpperCamelCase : Tuple="<|endoftext|>" , UpperCamelCase : List[Any]=False , **UpperCamelCase : Dict , ): '''simple docstring''' super().__init__( UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , ) __UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space: __UpperCAmelCase : Union[str, Any] = getattr(UpperCamelCase , pre_tok_state.pop("""type""" ) ) __UpperCAmelCase : Dict = add_prefix_space __UpperCAmelCase : List[Any] = pre_tok_class(**UpperCamelCase ) __UpperCAmelCase : Any = add_prefix_space def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' __UpperCAmelCase : List[str] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : "Conversation" ): '''simple docstring''' __UpperCAmelCase : Any = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] ) if len(UpperCamelCase ) > self.model_max_length: __UpperCAmelCase : str = input_ids[-self.model_max_length :] return input_ids
299
"""simple docstring""" from ...configuration_utils import PretrainedConfig UpperCAmelCase : int = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class lowerCamelCase__ ( A ): """simple docstring""" __a = """tapas""" def __init__( self : Tuple , UpperCamelCase : Dict=30_522 , UpperCamelCase : Dict=768 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=3_072 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Optional[int]=1_024 , UpperCamelCase : Dict=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Union[str, Any]=1e-1_2 , UpperCamelCase : Tuple=0 , UpperCamelCase : List[str]=10.0 , UpperCamelCase : List[str]=0 , UpperCamelCase : str=1.0 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=1.0 , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[str]=1.0 , UpperCamelCase : List[Any]=1.0 , UpperCamelCase : str=False , UpperCamelCase : Dict=False , UpperCamelCase : List[str]="ratio" , UpperCamelCase : Tuple=None , UpperCamelCase : Any=None , UpperCamelCase : int=64 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : int=True , UpperCamelCase : str=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : List[str]=True , UpperCamelCase : int=False , UpperCamelCase : int=None , UpperCamelCase : Dict=None , **UpperCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __UpperCAmelCase : List[str] = vocab_size __UpperCAmelCase : int = hidden_size __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : Tuple = num_attention_heads __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : List[Any] = hidden_dropout_prob __UpperCAmelCase : Dict = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : Optional[Any] = type_vocab_sizes __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = layer_norm_eps # Fine-tuning task hyperparameters __UpperCAmelCase : Union[str, Any] = positive_label_weight __UpperCAmelCase : List[str] = num_aggregation_labels __UpperCAmelCase : Dict = aggregation_loss_weight __UpperCAmelCase : Any = use_answer_as_supervision __UpperCAmelCase : Any = answer_loss_importance __UpperCAmelCase : Any = use_normalized_answer_loss __UpperCAmelCase : Tuple = huber_loss_delta __UpperCAmelCase : List[Any] = temperature __UpperCAmelCase : str = aggregation_temperature __UpperCAmelCase : Optional[int] = use_gumbel_for_cells __UpperCAmelCase : Union[str, Any] = use_gumbel_for_aggregation __UpperCAmelCase : Tuple = average_approximation_function __UpperCAmelCase : List[Any] = cell_selection_preference __UpperCAmelCase : List[Any] = answer_loss_cutoff __UpperCAmelCase : Any = max_num_rows __UpperCAmelCase : int = max_num_columns __UpperCAmelCase : Any = average_logits_per_cell __UpperCAmelCase : int = select_one_column __UpperCAmelCase : str = allow_empty_column_selection __UpperCAmelCase : Optional[int] = init_cell_selection_weights_to_zero __UpperCAmelCase : Dict = reset_position_index_per_cell __UpperCAmelCase : List[Any] = disable_per_token_loss # Aggregation hyperparameters __UpperCAmelCase : Tuple = aggregation_labels __UpperCAmelCase : Union[str, Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , UpperCamelCase ): __UpperCAmelCase : int = {int(UpperCamelCase ): v for k, v in aggregation_labels.items()}
299
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ): if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , ) assert hasattr(self , '''env''' ) def __UpperCAmelCase ( self , _a ): __a = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}''' # distributed data settings __a = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_a , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , ) def __UpperCAmelCase ( self , _a ): TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def __UpperCAmelCase ( self , _a ): __a = self.create_estimator(_a ) # run training estimator.fit() # result dataframe __a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) __a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __a = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
695
import random class _lowercase : """simple docstring""" @staticmethod def _UpperCAmelCase ( UpperCAmelCase ): '''simple docstring''' _lowercase = [ord(UpperCAmelCase ) for i in text] _lowercase = [] _lowercase = [] for i in plain: _lowercase = random.randint(1 , 300 ) _lowercase = (i + k) * k cipher.append(UpperCAmelCase ) key.append(UpperCAmelCase ) return cipher, key @staticmethod def _UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ): '''simple docstring''' _lowercase = [] for i in range(len(UpperCAmelCase ) ): _lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(UpperCAmelCase ) ) return "".join(UpperCAmelCase ) if __name__ == "__main__": A_ , A_: List[Any] = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
398
0
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: Optional[Any] ): """simple docstring""" __lowerCAmelCase = 0 __lowerCAmelCase = len(UpperCamelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase ): return None __lowerCAmelCase = sorted_collection[point] if current_item == item: return point else: if point < left: __lowerCAmelCase = left __lowerCAmelCase = point elif point > right: __lowerCAmelCase = right __lowerCAmelCase = point else: if item < current_item: __lowerCAmelCase = point - 1 else: __lowerCAmelCase = point + 1 return None def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict ): """simple docstring""" if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase , UpperCamelCase , UpperCamelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase , UpperCamelCase , point + 1 , UpperCamelCase ) def _UpperCAmelCase ( UpperCamelCase: Optional[Any] ): """simple docstring""" if collection != sorted(UpperCamelCase ): raise ValueError("Collection must be ascending sorted" ) return True if __name__ == "__main__": import sys UpperCamelCase_ = 0 if debug == 1: UpperCamelCase_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") UpperCamelCase_ = 6_7 UpperCamelCase_ = interpolation_search(collection, target) if result is not None: print(f'''{target} found at positions: {result}''') else: print("Not found")
376
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class a ( unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : int ): """simple docstring""" __lowerCAmelCase = jnp.ones((batch_size, length) ) / length return scores def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __lowerCAmelCase = None __lowerCAmelCase = 20 __lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=snake_case__ ) # tweak scores to not be uniform anymore __lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch __lowerCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax __lowerCAmelCase = jax.nn.softmax(snake_case__ , axis=-1 ) __lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) __lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 ) __lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ , scores.copy() , cur_len=snake_case__ ) , axis=-1 ) __lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ , scores.copy() , cur_len=snake_case__ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __lowerCAmelCase = None __lowerCAmelCase = 10 __lowerCAmelCase = 2 # create ramp distribution __lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, vocab_size) ).copy() __lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size __lowerCAmelCase = FlaxTopKLogitsWarper(3 ) __lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case __lowerCAmelCase = 5 __lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) __lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, length) ).copy() __lowerCAmelCase = top_k_warp_safety_check(snake_case__ , snake_case__ , cur_len=snake_case__ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __lowerCAmelCase = None __lowerCAmelCase = 10 __lowerCAmelCase = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) __lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) ) __lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) __lowerCAmelCase = np.exp(top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 __lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] ) self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) # check edge cases with negative and extreme logits __lowerCAmelCase = np.broadcast_to(np.arange(snake_case__ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme __lowerCAmelCase = ramp_logits[1] * 1_0_0.0 # make sure at least 2 tokens are kept __lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) __lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __lowerCAmelCase = 20 __lowerCAmelCase = 4 __lowerCAmelCase = 0 __lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ ) # check that min length is applied at length 5 __lowerCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 ) __lowerCAmelCase = 5 __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = min_dist_processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] ) # check that min length is not applied anymore at length 15 __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = 15 __lowerCAmelCase = min_dist_processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) self.assertFalse(jnp.isinf(snake_case__ ).any() ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __lowerCAmelCase = 20 __lowerCAmelCase = 4 __lowerCAmelCase = 0 __lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ ) # check that all scores are -inf except the bos_token_id score __lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 ) __lowerCAmelCase = 1 __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 __lowerCAmelCase = 3 __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) self.assertFalse(jnp.isinf(snake_case__ ).any() ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __lowerCAmelCase = 20 __lowerCAmelCase = 4 __lowerCAmelCase = 0 __lowerCAmelCase = 5 __lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ ) # check that all scores are -inf except the eos_token_id when max_length is reached __lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 ) __lowerCAmelCase = 4 __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached __lowerCAmelCase = 3 __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = logits_processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) self.assertFalse(jnp.isinf(snake_case__ ).any() ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __lowerCAmelCase = 4 __lowerCAmelCase = 10 __lowerCAmelCase = 15 __lowerCAmelCase = 2 __lowerCAmelCase = 1 __lowerCAmelCase = 15 # dummy input_ids and scores __lowerCAmelCase = ids_tensor((batch_size, sequence_length) , snake_case__ ) __lowerCAmelCase = input_ids.copy() __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = scores.copy() # instantiate all dist processors __lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) __lowerCAmelCase = FlaxTopKLogitsWarper(3 ) __lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ ) __lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ ) __lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ ) __lowerCAmelCase = 10 # no processor list __lowerCAmelCase = temp_dist_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = min_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = bos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = eos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ ) # with processor list __lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __lowerCAmelCase = processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) # scores should be equal self.assertTrue(jnp.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __lowerCAmelCase = 4 __lowerCAmelCase = 10 __lowerCAmelCase = 15 __lowerCAmelCase = 2 __lowerCAmelCase = 1 __lowerCAmelCase = 15 # dummy input_ids and scores __lowerCAmelCase = ids_tensor((batch_size, sequence_length) , snake_case__ ) __lowerCAmelCase = input_ids.copy() __lowerCAmelCase = self._get_uniform_logits(snake_case__ , snake_case__ ) __lowerCAmelCase = scores.copy() # instantiate all dist processors __lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) __lowerCAmelCase = FlaxTopKLogitsWarper(3 ) __lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=snake_case__ ) __lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ ) __lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ , eos_token_id=snake_case__ ) __lowerCAmelCase = 10 # no processor list def run_no_processor_list(snake_case__ : int , snake_case__ : Any , snake_case__ : int ): __lowerCAmelCase = temp_dist_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = top_k_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = top_p_warp(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = min_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = bos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ ) __lowerCAmelCase = eos_dist_proc(snake_case__ , snake_case__ , cur_len=snake_case__ ) return scores # with processor list def run_processor_list(snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str ): __lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __lowerCAmelCase = processor(snake_case__ , snake_case__ , cur_len=snake_case__ ) return scores __lowerCAmelCase = jax.jit(snake_case__ ) __lowerCAmelCase = jax.jit(snake_case__ ) __lowerCAmelCase = jitted_run_no_processor_list(snake_case__ , snake_case__ , snake_case__ ) __lowerCAmelCase = jitted_run_processor_list(snake_case__ , snake_case__ , snake_case__ ) # scores should be equal self.assertTrue(jnp.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
376
1
class a_ : def __init__( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = [0] * size SCREAMING_SNAKE_CASE_ = [0] * size @staticmethod def A_( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def A_( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ = value while index < self.size: SCREAMING_SNAKE_CASE_ = self.get_prev(SCREAMING_SNAKE_CASE ) + 1 if current_left_border == index: SCREAMING_SNAKE_CASE_ = value else: SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = self.get_next(SCREAMING_SNAKE_CASE ) def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive SCREAMING_SNAKE_CASE_ = 0 while left <= right: SCREAMING_SNAKE_CASE_ = self.get_prev(SCREAMING_SNAKE_CASE ) if left <= current_left: SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , self.tree[right] ) SCREAMING_SNAKE_CASE_ = current_left else: SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
205
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self : List[str] , snake_case : int , snake_case : Optional[int]=1_3 , snake_case : List[str]=3_0 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=3 , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=3_2 , snake_case : int=5 , snake_case : int=4 , snake_case : List[str]=3_7 , snake_case : Union[str, Any]="gelu" , snake_case : int=0.1 , snake_case : Dict=0.1 , snake_case : Any=1_0 , snake_case : Any=0.02 , snake_case : int=3 , snake_case : int=0.6 , snake_case : str=None , ) -> Any: """simple docstring""" UpperCamelCase_ : List[Any] = parent UpperCamelCase_ : Optional[int] = batch_size UpperCamelCase_ : Optional[Any] = image_size UpperCamelCase_ : Optional[int] = patch_size UpperCamelCase_ : List[str] = num_channels UpperCamelCase_ : Optional[int] = is_training UpperCamelCase_ : Tuple = use_labels UpperCamelCase_ : str = hidden_size UpperCamelCase_ : Union[str, Any] = num_hidden_layers UpperCamelCase_ : int = num_attention_heads UpperCamelCase_ : Optional[Any] = intermediate_size UpperCamelCase_ : Optional[int] = hidden_act UpperCamelCase_ : int = hidden_dropout_prob UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase_ : List[str] = type_sequence_label_size UpperCamelCase_ : List[str] = initializer_range UpperCamelCase_ : Union[str, Any] = mask_ratio UpperCamelCase_ : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase_ : int = (image_size // patch_size) ** 2 UpperCamelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_ : Tuple = None if self.use_labels: UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase_ : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : Optional[int] , snake_case : str ) -> List[Any]: """simple docstring""" UpperCamelCase_ : Optional[Any] = ViTMAEModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCamelCase_ : Any = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : List[str] = ViTMAEForPreTraining(snake_case ) model.to(snake_case ) model.eval() UpperCamelCase_ : Optional[int] = model(snake_case ) UpperCamelCase_ : List[str] = (self.image_size // self.patch_size) ** 2 UpperCamelCase_ : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase_ : List[str] = 1 UpperCamelCase_ : Tuple = ViTMAEForPreTraining(snake_case ) model.to(snake_case ) model.eval() UpperCamelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase_ : Optional[Any] = model(snake_case ) UpperCamelCase_ : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : List[str] = self.prepare_config_and_inputs() UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = config_and_inputs UpperCamelCase_ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ): lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: """simple docstring""" UpperCamelCase_ : Tuple = ViTMAEModelTester(self ) UpperCamelCase_ : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: """simple docstring""" UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : List[str] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str: """simple docstring""" UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : Optional[Any] = model_class(snake_case ) UpperCamelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ : Optional[int] = [*signature.parameters.keys()] UpperCamelCase_ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: """simple docstring""" UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple ) -> int: """simple docstring""" np.random.seed(2 ) UpperCamelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) UpperCamelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase_ : Optional[Any] = torch.from_numpy(snake_case ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase_ : Optional[int] = pt_noise super().check_pt_tf_models(snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : str = model_class(snake_case ) model.to(snake_case ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCamelCase_ : Any = outputs[0].cpu().numpy() UpperCamelCase_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case ) UpperCamelCase_ : Any = model_class.from_pretrained(snake_case ) model.to(snake_case ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): UpperCamelCase_ : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) # Make sure we don't have nans UpperCamelCase_ : int = after_outputs[0].cpu().numpy() UpperCamelCase_ : Union[str, Any] = 0 UpperCamelCase_ : Optional[int] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case , 1e-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: """simple docstring""" pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: """simple docstring""" pass @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ : Tuple = ViTMAEModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __lowercase ( ): UpperCamelCase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) UpperCamelCase_ : Dict = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case ) UpperCamelCase_ : str = self.default_image_processor UpperCamelCase_ : int = prepare_img() UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase_ : Optional[Any] = ViTMAEConfig() UpperCamelCase_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase_ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): UpperCamelCase_ : Tuple = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) ) # verify the logits UpperCamelCase_ : Optional[int] = torch.Size((1, 1_9_6, 7_6_8) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCamelCase_ : List[Any] = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
417
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : List[str] = { 'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json', 'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json', 'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json', 'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json', 'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json', 'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json', 'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json', 'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json', 'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json', } class lowercase__ ( UpperCamelCase_): UpperCamelCase_ = """xmod""" def __init__( self : Optional[Any] , UpperCamelCase__ : List[Any]=3_0522 , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1E-12 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : int="absolute" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=("en_XX",) , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Dict = vocab_size SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : str = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : Tuple = position_embedding_type SCREAMING_SNAKE_CASE : List[str] = use_cache SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout SCREAMING_SNAKE_CASE : Tuple = pre_norm SCREAMING_SNAKE_CASE : List[Any] = adapter_reduction_factor SCREAMING_SNAKE_CASE : List[Any] = adapter_layer_norm SCREAMING_SNAKE_CASE : Tuple = adapter_reuse_layer_norm SCREAMING_SNAKE_CASE : Dict = ln_before_adapter SCREAMING_SNAKE_CASE : Union[str, Any] = list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : int = default_language class lowercase__ ( UpperCamelCase_): @property def __A ( self : int ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
34
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase : Tuple = { 'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'], 'tokenization_ctrl': ['CTRLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ 'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'CTRLForSequenceClassification', 'CTRLLMHeadModel', 'CTRLModel', 'CTRLPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ 'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCTRLForSequenceClassification', 'TFCTRLLMHeadModel', 'TFCTRLModel', 'TFCTRLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
34
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ : str ={ 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[Any] =[ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase__ : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
101
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging _A = logging.get_logger(__name__) _A = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class _lowerCAmelCase ( __a ): _lowercase ='''bloom''' _lowercase =['''past_key_values'''] _lowercase ={ '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self , _UpperCamelCase=250_880 , _UpperCamelCase=64 , _UpperCamelCase=2 , _UpperCamelCase=8 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=False , **_UpperCamelCase , ) -> str: lowerCAmelCase_ = vocab_size # Backward compatibility with n_embed kwarg lowerCAmelCase_ = kwargs.pop("n_embed" , _UpperCamelCase ) lowerCAmelCase_ = hidden_size if n_embed is None else n_embed lowerCAmelCase_ = n_layer lowerCAmelCase_ = n_head lowerCAmelCase_ = layer_norm_epsilon lowerCAmelCase_ = initializer_range lowerCAmelCase_ = use_cache lowerCAmelCase_ = pretraining_tp lowerCAmelCase_ = apply_residual_connection_post_layernorm lowerCAmelCase_ = hidden_dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = slow_but_exact super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) class _lowerCAmelCase ( __a ): _lowercase =version.parse('''1.12''' ) def __init__( self , _UpperCamelCase , _UpperCamelCase = "default" , _UpperCamelCase = None , _UpperCamelCase = False , ) -> int: super().__init__(_UpperCamelCase , task=_UpperCamelCase , patching_specs=_UpperCamelCase , use_past=_UpperCamelCase ) if not getattr(self._config , "pad_token_id" , _UpperCamelCase ): # TODO: how to do that better? lowerCAmelCase_ = 0 @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: lowerCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_UpperCamelCase , direction="inputs" , inverted_values_shape=_UpperCamelCase ) lowerCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"} else: lowerCAmelCase_ = {0: "batch", 1: "sequence"} return common_inputs @property def __a ( self ) -> int: return self._config.n_layer @property def __a ( self ) -> int: return self._config.n_head @property def __a ( self ) -> float: return 1e-3 def __a ( self , _UpperCamelCase , _UpperCamelCase = -1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , ) -> Mapping[str, Any]: lowerCAmelCase_ = super(_UpperCamelCase , self ).generate_dummy_inputs( _UpperCamelCase , batch_size=_UpperCamelCase , seq_length=_UpperCamelCase , is_pair=_UpperCamelCase , framework=_UpperCamelCase ) # We need to order the input in the way they appears in the forward() lowerCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase_ , lowerCAmelCase_ = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase_ = seqlen + 2 lowerCAmelCase_ = self._config.hidden_size // self.num_attention_heads lowerCAmelCase_ = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowerCAmelCase_ = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowerCAmelCase_ = [ (torch.zeros(_UpperCamelCase ), torch.zeros(_UpperCamelCase )) for _ in range(self.num_layers ) ] lowerCAmelCase_ = common_inputs["attention_mask"] if self.use_past: lowerCAmelCase_ = ordered_inputs["attention_mask"].dtype lowerCAmelCase_ = torch.cat( [ordered_inputs["attention_mask"], torch.ones(_UpperCamelCase , _UpperCamelCase , dtype=_UpperCamelCase )] , dim=1 ) return ordered_inputs @property def __a ( self ) -> int: return 13
290
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def lowercase_ (A : bytes , A : int ): snake_case__ : int = F'''{sampling_rate}''' snake_case__ : Union[str, Any] = '1' snake_case__ : int = 'f32le' snake_case__ : Dict = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: snake_case__ : Optional[Any] = ffmpeg_process.communicate(A ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error snake_case__ : Tuple = output_stream[0] snake_case__ : Optional[int] = np.frombuffer(A , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def lowercase_ (A : int , A : float , A : str = "f32le" , ): snake_case__ : str = F'''{sampling_rate}''' snake_case__ : Optional[Any] = '1' if format_for_conversion == "s16le": snake_case__ : Union[str, Any] = 2 elif format_for_conversion == "f32le": snake_case__ : Tuple = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) snake_case__ : int = platform.system() if system == "Linux": snake_case__ : Any = 'alsa' snake_case__ : int = 'default' elif system == "Darwin": snake_case__ : Optional[int] = 'avfoundation' snake_case__ : str = ':0' elif system == "Windows": snake_case__ : List[str] = 'dshow' snake_case__ : Tuple = 'default' snake_case__ : Dict = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] snake_case__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample snake_case__ : Union[str, Any] = _ffmpeg_stream(A , A ) for item in iterator: yield item def lowercase_ (A : int , A : float , A : Optional[int] = None , A : Optional[Union[Tuple[float, float], float]] = None , A : str = "f32le" , ): if stream_chunk_s is not None: snake_case__ : List[Any] = stream_chunk_s else: snake_case__ : Optional[int] = chunk_length_s snake_case__ : Optional[Any] = ffmpeg_microphone(A , A , format_for_conversion=A ) if format_for_conversion == "s16le": snake_case__ : str = np.intaa snake_case__ : Dict = 2 elif format_for_conversion == "f32le": snake_case__ : Optional[int] = np.floataa snake_case__ : List[str] = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: snake_case__ : Dict = chunk_length_s / 6 snake_case__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(A , (int, float) ): snake_case__ : Optional[int] = [stride_length_s, stride_length_s] snake_case__ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample snake_case__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample snake_case__ : str = datetime.datetime.now() snake_case__ : int = datetime.timedelta(seconds=A ) for item in chunk_bytes_iter(A , A , stride=(stride_left, stride_right) , stream=A ): # Put everything back in numpy scale snake_case__ : int = np.frombuffer(item['raw'] , dtype=A ) snake_case__ : Tuple = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) snake_case__ : Union[str, Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 1_0 * delta: # We're late !! SKIP continue yield item def lowercase_ (A : Tuple , A : int , A : Tuple[int, int] , A : bool = False ): snake_case__ : List[str] = b'' snake_case__ , snake_case__ : Tuple = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) snake_case__ : Tuple = 0 for raw in iterator: acc += raw if stream and len(A ) < chunk_len: snake_case__ : List[str] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(A ) >= chunk_len: # We are flushing the accumulator snake_case__ : Union[str, Any] = (_stride_left, stride_right) snake_case__ : int = {'raw': acc[:chunk_len], 'stride': stride} if stream: snake_case__ : Tuple = False yield item snake_case__ : List[Any] = stride_left snake_case__ : Optional[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(A ) > stride_left: snake_case__ : List[Any] = {'raw': acc, 'stride': (_stride_left, 0)} if stream: snake_case__ : Tuple = False yield item def lowercase_ (A : Optional[int] , A : int ): snake_case__ : Tuple = 2**2_4 # 16Mo try: with subprocess.Popen(A , stdout=subprocess.PIPE , bufsize=A ) as ffmpeg_process: while True: snake_case__ : List[Any] = ffmpeg_process.stdout.read(A ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
243
def lowercase_ (A : list[int] ): snake_case__ : Tuple = [] if len(A ) == 1: return [nums.copy()] for _ in range(len(A ) ): snake_case__ : str = nums.pop(0 ) snake_case__ : Optional[int] = permute(A ) for perm in permutations: perm.append(A ) result.extend(A ) nums.append(A ) return result def lowercase_ (A : Any ): def backtrack(A : List[Any] ): if start == len(A ) - 1: output.append(nums[:] ) else: for i in range(A , len(A ) ): snake_case__ , snake_case__ : Optional[Any] = nums[i], nums[start] backtrack(start + 1 ) snake_case__ , snake_case__ : Union[str, Any] = nums[i], nums[start] # backtrack snake_case__ : Any = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function a_ :List[str] = permutea([1, 2, 3]) print(res) doctest.testmod()
243
1
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' pass @nightly @require_onnxruntime @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __UpperCamelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE : Union[str, Any] = False return options def __UpperCamelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) SCREAMING_SNAKE_CASE : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench" SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE : Optional[int] = pipe( prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , ) SCREAMING_SNAKE_CASE : List[Any] = output.images SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) SCREAMING_SNAKE_CASE : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench" SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe( prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , ) SCREAMING_SNAKE_CASE : List[str] = output.images SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
25
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='roformer' def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int: """simple docstring""" super().__init__(pad_token_id=a , **a ) SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = hidden_act SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Any = type_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = rotary_value SCREAMING_SNAKE_CASE : int = use_cache class _UpperCamelCase ( __A ): '''simple docstring''' @property def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"} SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
25
1
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __lowerCamelCase : Tuple = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __lowerCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) snake_case__ : Optional[int] = bs[:] snake_case__ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 snake_case__ : Dict = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): snake_case__ : Dict = set() snake_case__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : List[Any] = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ): snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: snake_case__ : Any = json.load(__A ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} snake_case__ : Union[str, Any] = errors # how to handle errors in decoding snake_case__ : Any = bytes_to_unicode() snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: snake_case__ : str = merges_handle.read().split("\n" )[1:-1] snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges] snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) ) snake_case__ : Optional[int] = {} snake_case__ : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowercase ( self : List[Any] ): return len(self.encoder ) def _lowercase ( self : Any ): return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Optional[Any] , __A : Optional[int] ): if token in self.cache: return self.cache[token] snake_case__ : Union[str, Any] = tuple(__A ) snake_case__ : List[Any] = get_pairs(__A ) if not pairs: return token while True: snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case__ : Dict = bigram snake_case__ : str = [] snake_case__ : Union[str, Any] = 0 while i < len(__A ): try: snake_case__ : Dict = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case__ : str = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : str = tuple(__A ) snake_case__ : int = new_word if len(__A ) == 1: break else: snake_case__ : List[str] = get_pairs(__A ) snake_case__ : List[Any] = " ".join(__A ) snake_case__ : Optional[int] = word return word def _lowercase ( self : Optional[Any] , __A : Optional[Any] ): snake_case__ : List[str] = [] for token in re.findall(self.pat , __A ): snake_case__ : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , __A : Optional[Any] ): return self.decoder.get(__A ) def _lowercase ( self : Union[str, Any] , __A : Dict ): snake_case__ : Optional[Any] = "".join(__A ) snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ : str = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) snake_case__ : str = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) snake_case__ : int = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : Tuple = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ): snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): snake_case__ : Optional[int] = " " + text return (text, kwargs) def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ): snake_case__ : Optional[Any] = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A ) if needs_to_be_padded: snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case__ : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case__ : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
713
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def SCREAMING_SNAKE_CASE ( snake_case_ : dict ): return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ): snake_case__ : Optional[int] = XGBClassifier() classifier.fit(snake_case_ , snake_case_ ) return classifier def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = load_iris() snake_case__, snake_case__ : str = data_handling(snake_case_ ) snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split( snake_case_ , snake_case_ , test_size=0.25 ) snake_case__ : Dict = iris["target_names"] # Create an XGBoost Classifier from the training data snake_case__ : Dict = xgboost(snake_case_ , snake_case_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
25
0
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class snake_case_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self) -> Dict: UpperCamelCase = '''ylacombe/bark-small''' UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = '''en_speaker_1''' UpperCamelCase = '''This is a test string''' UpperCamelCase = '''speaker_embeddings_path.json''' UpperCamelCase = '''speaker_embeddings''' def UpperCAmelCase__ ( self , **lowerCamelCase_) -> str: return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_) def UpperCAmelCase__ ( self) -> Any: shutil.rmtree(self.tmpdirname) def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = self.get_tokenizer() UpperCamelCase = BarkProcessor(tokenizer=lowerCamelCase_) processor.save_pretrained(self.tmpdirname) UpperCamelCase = BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def UpperCAmelCase__ ( self) -> Dict: UpperCamelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') UpperCamelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def UpperCAmelCase__ ( self) -> int: UpperCamelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCamelCase = 3_5 UpperCamelCase = 2 UpperCamelCase = 8 UpperCamelCase = { '''semantic_prompt''': np.ones(lowerCamelCase_), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len)), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset UpperCamelCase = processor(text=self.input_string , voice_preset=lowerCamelCase_) UpperCamelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([])).tolist()) # test loading voice preset from npz file UpperCamelCase = os.path.join(self.tmpdirname , '''file.npz''') np.savez(lowerCamelCase_ , **lowerCamelCase_) UpperCamelCase = processor(text=self.input_string , voice_preset=lowerCamelCase_) UpperCamelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([])).tolist()) # test loading voice preset from the hub UpperCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset) def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = self.get_tokenizer() UpperCamelCase = BarkProcessor(tokenizer=lowerCamelCase_) UpperCamelCase = processor(text=self.input_string) UpperCamelCase = tokenizer( self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
34
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""image_processor""", """tokenizer"""] a__ = """BridgeTowerImageProcessor""" a__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Dict , ) -> BatchEncoding: """simple docstring""" __magic_name__ = self.tokenizer( text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) # add pixel_values + pixel_mask __magic_name__ = self.image_processor( UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ ) encoding.update(UpperCamelCase__ ) return encoding def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ) -> str: """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[Any] ) -> str: """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.tokenizer.model_input_names __magic_name__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
529
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase__ ( __SCREAMING_SNAKE_CASE ): A__= ['image_processor', 'tokenizer'] A__= 'AutoImageProcessor' A__= 'AutoTokenizer' def __init__( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : List[str] ): """simple docstring""" super().__init__(_lowercase , _lowercase ) UpperCAmelCase__ = self.image_processor def __call__( self : Optional[Any] , _lowercase : Tuple=None , _lowercase : Dict=None , _lowercase : Union[str, Any]=None , **_lowercase : Dict ): """simple docstring""" if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: UpperCAmelCase__ = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) if images is not None: UpperCAmelCase__ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase ) if text is not None and images is not None: UpperCAmelCase__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase ) def _UpperCAmelCase ( self : Dict , *_lowercase : Optional[Any] , **_lowercase : List[str] ): """simple docstring""" return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def _UpperCAmelCase ( self : List[str] , *_lowercase : Optional[int] , **_lowercase : List[str] ): """simple docstring""" return self.tokenizer.decode(*_lowercase , **_lowercase ) @property def _UpperCAmelCase ( self : int ): """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
719
from __future__ import annotations import math def __UpperCAmelCase ( __A , __A ) -> float: '''simple docstring''' UpperCAmelCase__ = u for i in range(1 , __A ): UpperCAmelCase__ = temp * (u - i) return temp def __UpperCAmelCase ( ) -> None: '''simple docstring''' UpperCAmelCase__ = int(input("enter the numbers of values: " ) ) UpperCAmelCase__ = [] for _ in range(__A ): y.append([] ) for i in range(__A ): for j in range(__A ): y[i].append(__A ) UpperCAmelCase__ = 0 print("enter the values of parameters in a list: " ) UpperCAmelCase__ = list(map(__A , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(__A ): UpperCAmelCase__ = float(input() ) UpperCAmelCase__ = int(input("enter the value to interpolate: " ) ) UpperCAmelCase__ = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , __A ): for j in range(n - i ): UpperCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1] UpperCAmelCase__ = y[0][0] for i in range(1 , __A ): summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A ) print(F"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
277
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a : str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
613
from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[Any] = logging.get_logger(__name__) a : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE: int = 'openai-gpt' SCREAMING_SNAKE_CASE: List[str] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , lowerCamelCase__=40_478 , lowerCamelCase__=512 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0_2 , lowerCamelCase__="cls_index" , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=0.1 , **lowerCamelCase__ , ): lowerCAmelCase_: Union[str, Any] = vocab_size lowerCAmelCase_: List[Any] = n_positions lowerCAmelCase_: Tuple = n_embd lowerCAmelCase_: Optional[int] = n_layer lowerCAmelCase_: Optional[int] = n_head lowerCAmelCase_: int = afn lowerCAmelCase_: str = resid_pdrop lowerCAmelCase_: Optional[int] = embd_pdrop lowerCAmelCase_: Optional[int] = attn_pdrop lowerCAmelCase_: Dict = layer_norm_epsilon lowerCAmelCase_: List[Any] = initializer_range lowerCAmelCase_: Union[str, Any] = summary_type lowerCAmelCase_: Any = summary_use_proj lowerCAmelCase_: Dict = summary_activation lowerCAmelCase_: Dict = summary_first_dropout lowerCAmelCase_: List[Any] = summary_proj_to_labels super().__init__(**lowerCamelCase__ )
613
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
650
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) # TODO Update this a = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = 'esm' def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = emb_layer_norm_before lowercase_ = token_dropout lowercase_ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) lowercase_ = EsmFoldConfig() elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = EsmFoldConfig(**UpperCamelCase__ ) lowercase_ = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) lowercase_ = get_default_vocab_list() else: lowercase_ = vocab_list else: lowercase_ = None lowercase_ = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' lowercase_ = super().to_dict() if isinstance(self.esmfold_config , UpperCamelCase__ ): lowercase_ = self.esmfold_config.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : "TrunkConfig" = None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase_ = TrunkConfig() elif isinstance(self.trunk , UpperCamelCase__ ): lowercase_ = TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.trunk.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 48 __SCREAMING_SNAKE_CASE : int = 1024 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : float = 0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Optional[int] = 128 __SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.structure_module is None: lowercase_ = StructureModuleConfig() elif isinstance(self.structure_module , UpperCamelCase__ ): lowercase_ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase_ = self.sequence_state_dim // self.sequence_head_width lowercase_ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = asdict(self ) lowercase_ = self.structure_module.to_dict() return output @dataclass class UpperCamelCase__ : __SCREAMING_SNAKE_CASE : int = 384 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 16 __SCREAMING_SNAKE_CASE : int = 128 __SCREAMING_SNAKE_CASE : int = 12 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : float = 0.1 __SCREAMING_SNAKE_CASE : int = 8 __SCREAMING_SNAKE_CASE : int = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : int = 7 __SCREAMING_SNAKE_CASE : int = 10 __SCREAMING_SNAKE_CASE : float = 1e-8 __SCREAMING_SNAKE_CASE : float = 1e5 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return asdict(self ) def UpperCAmelCase_ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
650
1
"""simple docstring""" import comet # From: unbabel-comet import torch import datasets UpperCAmelCase__ =datasets.logging.get_logger(__name__) UpperCAmelCase__ ="\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" UpperCAmelCase__ ="\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" UpperCAmelCase__ ="\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """sources""": datasets.Value("""string""" , id="""sequence""" ), """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[ """https://github.com/Unbabel/COMET""", """https://www.aclweb.org/anthology/2020.emnlp-main.213/""", """http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""", ] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Tuple ): '''simple docstring''' if self.config_name == "default": __lowercase = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) ) else: __lowercase = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : List[str] , A_ : List[Any] , A_ : int , A_ : Optional[Any]=None , A_ : Dict=False ): '''simple docstring''' if gpus is None: __lowercase = 1 if torch.cuda.is_available() else 0 __lowercase = {"""src""": sources, """mt""": predictions, """ref""": references} __lowercase = [dict(zip(A_ , A_ ) ) for t in zip(*data.values() )] __lowercase , __lowercase = self.scorer.predict(A_ , gpus=A_ , progress_bar=A_ ) return {"mean_score": mean_score, "scores": scores}
616
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") UpperCAmelCase__ ={"target_lang": "fi", "source_lang": "en"} UpperCAmelCase__ =">>zh<<" UpperCAmelCase__ ="Helsinki-NLP/" if is_torch_available(): UpperCAmelCase__ ="pt" elif is_tf_available(): UpperCAmelCase__ ="tf" else: UpperCAmelCase__ ="jax" @require_sentencepiece class lowerCamelCase__ ( _a , unittest.TestCase ): a : Dict = MarianTokenizer a : Optional[int] = False a : Any = True def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' super().setUp() __lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] __lowercase = dict(zip(A_ , range(len(A_ ) ) ) ) __lowercase = Path(self.tmpdirname ) save_json(A_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(A_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(A_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) __lowercase = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : Dict , **A_ : Any ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **A_ ) def SCREAMING_SNAKE_CASE_ ( self : int , A_ : List[str] ): '''simple docstring''' return ( "This is a test", "This is a test", ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' __lowercase = """</s>""" __lowercase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(A_ ) , 9 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' __lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) __lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=A_ ) self.assertIsInstance(A_ , A_ ) __lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0] self.assertListEqual(A_ , batch.input_ids[0] ) __lowercase = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A_ ) __lowercase = [x.name for x in Path(A_ ).glob("""*""" )] self.assertIn("""source.spm""" , A_ ) MarianTokenizer.from_pretrained(A_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' __lowercase = self.get_tokenizer() __lowercase = tok( ["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=A_ , truncation=A_ , return_tensors=A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2) ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' __lowercase = self.get_tokenizer() __lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=A_ , return_tensors=A_ ) self.assertIsInstance(A_ , A_ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' __lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' __lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) __lowercase = """Tämä on testi""" __lowercase = """This is a test""" __lowercase = [7_6, 7, 2_0_4_7, 2] __lowercase = [6_9, 1_2, 1_1, 9_4_0, 2] __lowercase = tokenizer(A_ ).input_ids self.assertListEqual(A_ , A_ ) __lowercase = tokenizer(text_target=A_ ).input_ids self.assertListEqual(A_ , A_ ) __lowercase = tokenizer.decode(A_ , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ )
616
1
def __UpperCamelCase ( snake_case = 1_0_0_0 ) -> int: '''simple docstring''' __A , __A = 1, 1 __A = 2 while True: __A = 0 __A = fa + fa __A , __A = fa, f index += 1 for _ in str(snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
341
class _lowerCAmelCase: """simple docstring""" def __init__( self , UpperCAmelCase )-> None: __A = len(UpperCAmelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1 , UpperCAmelCase ): __A = self.prefix_sum[i - 1] + array[i] def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase )-> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> bool: __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
341
1