code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCamelCase : List[str] = '''src/transformers'''
# Matches is_xxx_available()
__lowerCamelCase : Optional[int] = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowerCamelCase : Union[str, Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCamelCase : List[Any] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowerCamelCase : Optional[int] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowerCamelCase : Any = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCamelCase : str = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCamelCase : List[str] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCamelCase : List[str] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowerCamelCase : int = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowerCamelCase : List[Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
__lowerCamelCase : Any = re.compile(R'''^\s*else:''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
if _re_test_backend.search(__magic_name__ ) is None:
return None
snake_case_ : Union[str, Any] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
with open(__magic_name__ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : str = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ : List[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
snake_case_ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
snake_case_ : List[Any] = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
snake_case_ : Optional[Any] = re.findall("\[([^\]]+)\]" ,__magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
snake_case_ : Any = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
snake_case_ : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
snake_case_ : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
snake_case_ : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
snake_case_ : Union[str, Any] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
snake_case_ : Dict = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
snake_case_ : int = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
snake_case_ : Optional[Any] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
snake_case_ : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ : Optional[int] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
snake_case_ : int = lines[line_index]
snake_case_ : List[str] = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case_ : str = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
snake_case_ : Tuple = lines[line_index]
snake_case_ : List[str] = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case_ : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case_ : Optional[int] = []
for key in import_dict_objects.keys():
snake_case_ : Any = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
snake_case_ : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case_ : Any = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
snake_case_ : Optional[int] = os.path.join(__magic_name__ ,"__init__.py" )
snake_case_ : Optional[int] = parse_init(__magic_name__ )
if objects is not None:
snake_case_ : Union[str, Any] = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Union[str, Any] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
snake_case_ : Any = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
snake_case_ : Optional[int] = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
snake_case_ : Any = short_path.replace(os.path.sep ,"." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
snake_case_ : str = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
snake_case_ : Optional[int] = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
__lowerCamelCase : Union[str, Any] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = importlib.util.spec_from_file_location(
"transformers" ,os.path.join(__magic_name__ ,"__init__.py" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
snake_case_ : Dict = spec.loader.load_module()
snake_case_ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
snake_case_ : Optional[int] = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCamelCase : Optional[int] = logging.getLogger(__name__)
__lowerCamelCase : int = tf.data.AUTOTUNE
def __UpperCAmelCase ( )-> Any:
"""simple docstring"""
snake_case_ : Optional[int] = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" ,type=__magic_name__ ,default="roberta-base" ,help="The model config to use. Note that we don't copy the model's weights, only the config!" ,)
parser.add_argument(
"--tokenizer" ,type=__magic_name__ ,default="unigram-tokenizer-wikitext" ,help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." ,)
parser.add_argument(
"--per_replica_batch_size" ,type=__magic_name__ ,default=8 ,help="Batch size per TPU core." ,)
parser.add_argument(
"--no_tpu" ,action="store_true" ,help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." ,)
parser.add_argument(
"--tpu_name" ,type=__magic_name__ ,help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." ,default="local" ,)
parser.add_argument(
"--tpu_zone" ,type=__magic_name__ ,help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." ,)
parser.add_argument(
"--gcp_project" ,type=__magic_name__ ,help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" ,action="store_true" ,help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." ,)
parser.add_argument(
"--train_dataset" ,type=__magic_name__ ,help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." ,)
parser.add_argument(
"--shuffle_buffer_size" ,type=__magic_name__ ,default=2**18 ,help="Size of the shuffle buffer (in samples)" ,)
parser.add_argument(
"--eval_dataset" ,type=__magic_name__ ,help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." ,)
parser.add_argument(
"--num_epochs" ,type=__magic_name__ ,default=1 ,help="Number of epochs to train for." ,)
parser.add_argument(
"--learning_rate" ,type=__magic_name__ ,default=1E-4 ,help="Learning rate to use for training." ,)
parser.add_argument(
"--weight_decay_rate" ,type=__magic_name__ ,default=1E-3 ,help="Weight decay rate to use for training." ,)
parser.add_argument(
"--max_length" ,type=__magic_name__ ,default=512 ,help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" ,)
parser.add_argument(
"--mlm_probability" ,type=__magic_name__ ,default=0.15 ,help="Fraction of tokens to mask during training." ,)
parser.add_argument("--output_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" ,type=__magic_name__ ,help="Model ID to upload to on the Hugging Face Hub." )
snake_case_ : int = parser.parse_args()
return args
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
try:
if args.tpu_name:
snake_case_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project )
else:
snake_case_ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = 0
for file in file_list:
snake_case_ : Optional[Any] = file.split("/" )[-1]
snake_case_ : str = re.search(r"-\d+-(\d+)\.tfrecord" ,__magic_name__ ).group(1 )
snake_case_ : Any = int(__magic_name__ )
num_samples += sample_count
return num_samples
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=None )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = count_samples(__magic_name__ )
snake_case_ : Optional[Any] = tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
snake_case_ : Union[str, Any] = dataset.shuffle(len(__magic_name__ ) )
snake_case_ : Optional[Any] = tf.data.TFRecordDataset(__magic_name__ ,num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ : List[str] = dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
snake_case_ : List[str] = dataset.map(__magic_name__ ,num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ : int = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ : Optional[int] = dataset.batch(__magic_name__ ,drop_remainder=__magic_name__ )
snake_case_ : Dict = dataset.map(__magic_name__ ,num_parallel_calls=__magic_name__ )
snake_case_ : Optional[int] = dataset.prefetch(__magic_name__ )
return dataset
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if not args.no_tpu:
snake_case_ : Tuple = initialize_tpu(__magic_name__ )
snake_case_ : Optional[Any] = tf.distribute.TPUStrategy(__magic_name__ )
else:
snake_case_ : Dict = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ : str = tokenizer.vocab_size
snake_case_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset ,"*.tfrecord" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
snake_case_ : List[str] = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"*.tfrecord" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
snake_case_ : Dict = count_samples(__magic_name__ )
snake_case_ : Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ : List[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ : Optional[int] = TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_, snake_case_ : List[str] = create_optimizer(
num_train_steps=__magic_name__ ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ ,metrics=["accuracy"] )
def decode_fn(__magic_name__ ):
snake_case_ : Tuple = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ ,__magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=__magic_name__ ,mlm_probability=args.mlm_probability ,mlm=__magic_name__ ,return_tensors="tf" )
def mask_with_collator(__magic_name__ ):
# TF really needs an isin() function
snake_case_ : Any = (
~tf.cast(batch["attention_mask"] ,tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
snake_case_, snake_case_ : Optional[Any] = data_collator.tf_mask_tokens(
batch["input_ids"] ,vocab_size=len(__magic_name__ ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=__magic_name__ ,)
return batch
snake_case_ : int = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ : Dict = prepare_dataset(
__magic_name__ ,decode_fn=__magic_name__ ,mask_fn=__magic_name__ ,batch_size=__magic_name__ ,shuffle=__magic_name__ ,shuffle_buffer_size=args.shuffle_buffer_size ,)
snake_case_ : List[str] = prepare_dataset(
__magic_name__ ,decode_fn=__magic_name__ ,mask_fn=__magic_name__ ,batch_size=__magic_name__ ,shuffle=__magic_name__ ,)
snake_case_ : Union[str, Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ ,validation_data=__magic_name__ ,epochs=args.num_epochs ,callbacks=__magic_name__ ,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCamelCase : Tuple = parse_args()
main(args)
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCamelCase : List[str] = logging.get_logger(__name__)
# General docstring
__lowerCamelCase : Tuple = '''PoolFormerConfig'''
# Base docstring
__lowerCamelCase : Tuple = '''sail/poolformer_s12'''
__lowerCamelCase : int = [1, 512, 7, 7]
# Image classification docstring
__lowerCamelCase : Optional[Any] = '''sail/poolformer_s12'''
__lowerCamelCase : Optional[int] = '''tabby, tabby cat'''
__lowerCamelCase : Union[str, Any] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 0.0 ,__magic_name__ = False )-> str:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
snake_case_ : Dict = 1 - drop_prob
snake_case_ : Tuple = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case_ : List[Any] = keep_prob + torch.rand(__magic_name__ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
snake_case_ : Tuple = input.div(__magic_name__ ) * random_tensor
return output
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :Optional[float] = None ) -> None:
'''simple docstring'''
super().__init__()
snake_case_ : Dict = drop_prob
def _A ( self :Dict , lowerCAmelCase__ :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return drop_path(lowerCAmelCase__ , self.drop_prob , self.training )
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=None ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = patch_size if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case_ : str = stride if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (stride, stride)
snake_case_ : List[Any] = padding if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (padding, padding)
snake_case_ : List[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ )
snake_case_ : List[str] = norm_layer(lowerCAmelCase__ ) if norm_layer else nn.Identity()
def _A ( self :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = self.projection(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.norm(lowerCAmelCase__ )
return embeddings
class A_ (nn.GroupNorm ):
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :int , **lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
super().__init__(1 , lowerCAmelCase__ , **lowerCAmelCase__ )
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : str = nn.AvgPoolad(lowerCAmelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=lowerCAmelCase__ )
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
return self.pool(lowerCAmelCase__ ) - hidden_states
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
snake_case_ : Union[str, Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
snake_case_ : Union[str, Any] = PoolFormerDropPath(lowerCAmelCase__ )
if isinstance(config.hidden_act , lowerCAmelCase__ ):
snake_case_ : Any = ACTaFN[config.hidden_act]
else:
snake_case_ : List[Any] = config.hidden_act
def _A ( self :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.conva(lowerCAmelCase__ )
snake_case_ : List[str] = self.act_fn(lowerCAmelCase__ )
snake_case_ : str = self.drop(lowerCAmelCase__ )
snake_case_ : Dict = self.conva(lowerCAmelCase__ )
snake_case_ : str = self.drop(lowerCAmelCase__ )
return hidden_states
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ : int = PoolFormerPooling(lowerCAmelCase__ )
snake_case_ : List[Any] = PoolFormerOutput(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = PoolFormerGroupNorm(lowerCAmelCase__ )
snake_case_ : List[str] = PoolFormerGroupNorm(lowerCAmelCase__ )
# Useful for training neural nets
snake_case_ : Optional[Any] = PoolFormerDropPath(lowerCAmelCase__ ) if drop_path > 0.0 else nn.Identity()
snake_case_ : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case_ : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
snake_case_ : Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
def _A ( self :Any , lowerCAmelCase__ :Optional[int] ) -> Dict:
'''simple docstring'''
if self.use_layer_scale:
snake_case_ : str = self.pooling(self.before_norm(lowerCAmelCase__ ) )
snake_case_ : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case_ : Any = hidden_states + self.drop_path(lowerCAmelCase__ )
snake_case_ : str = ()
snake_case_ : List[str] = self.output(self.after_norm(lowerCAmelCase__ ) )
snake_case_ : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case_ : Dict = hidden_states + self.drop_path(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = (output,) + outputs
return outputs
else:
snake_case_ : List[Any] = self.drop_path(self.pooling(self.before_norm(lowerCAmelCase__ ) ) )
# First residual connection
snake_case_ : str = pooling_output + hidden_states
snake_case_ : Any = ()
# Second residual connection inside the PoolFormerOutput block
snake_case_ : List[str] = self.drop_path(self.output(self.after_norm(lowerCAmelCase__ ) ) )
snake_case_ : Optional[int] = hidden_states + layer_output
snake_case_ : Dict = (output,) + outputs
return outputs
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Optional[int] , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = config
# stochastic depth decay rule
snake_case_ : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case_ : List[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case_ : Dict = nn.ModuleList(lowerCAmelCase__ )
# Transformer blocks
snake_case_ : Tuple = []
snake_case_ : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case_ : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowerCAmelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowerCAmelCase__ ) )
snake_case_ : Tuple = nn.ModuleList(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :int=True ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = () if output_hidden_states else None
snake_case_ : Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case_, snake_case_ : Optional[int] = layers
# Get patch embeddings from hidden_states
snake_case_ : Optional[int] = embedding_layer(lowerCAmelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowerCAmelCase__ ):
snake_case_ : List[str] = blk(lowerCAmelCase__ )
snake_case_ : List[str] = layer_outputs[0]
if output_hidden_states:
snake_case_ : Optional[int] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class A_ (a_ ):
"""simple docstring"""
a__ = PoolFormerConfig
a__ = '''poolformer'''
a__ = '''pixel_values'''
a__ = True
def _A ( self :List[Any] , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _A ( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict=False ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = value
__lowerCamelCase : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCamelCase : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , a_ , )
class A_ (a_ ):
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
snake_case_ : str = config
snake_case_ : List[Any] = PoolFormerEncoder(lowerCAmelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self :Dict , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
snake_case_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
snake_case_ : Dict = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
snake_case_ : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case_ : int = nn.Linear(config.hidden_size , config.hidden_size )
def _A ( self :str , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = self.dense(lowerCAmelCase__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , a_ , )
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
snake_case_ : List[str] = config.num_labels
snake_case_ : List[str] = PoolFormerModel(lowerCAmelCase__ )
# Final norm
snake_case_ : List[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case_ : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self :List[Any] , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.LongTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
snake_case_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Optional[int] = self.poolformer(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
snake_case_ : Any = outputs[0]
snake_case_ : List[Any] = self.classifier(self.norm(lowerCAmelCase__ ).mean([-2, -1] ) )
snake_case_ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ : List[str] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ : int = "single_label_classification"
else:
snake_case_ : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case_ : List[Any] = MSELoss()
if self.num_labels == 1:
snake_case_ : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ : Union[str, Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
snake_case_ : Optional[Any] = CrossEntropyLoss()
snake_case_ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ : List[str] = BCEWithLogitsLoss()
snake_case_ : str = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
snake_case_ : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCamelCase : List[str] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCamelCase : Union[str, Any] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__lowerCamelCase : Tuple = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCamelCase : Dict = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCamelCase : List[str] = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,__magic_name__ )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ : Any = {
config.replace("Config" ,"" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ : int = collections.defaultdict(__magic_name__ )
snake_case_ : Any = collections.defaultdict(__magic_name__ )
snake_case_ : List[str] = collections.defaultdict(__magic_name__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__magic_name__ ):
snake_case_ : Dict = None
if _re_tf_models.match(__magic_name__ ) is not None:
snake_case_ : int = tf_models
snake_case_ : str = _re_tf_models.match(__magic_name__ ).groups()[0]
elif _re_flax_models.match(__magic_name__ ) is not None:
snake_case_ : Union[str, Any] = flax_models
snake_case_ : str = _re_flax_models.match(__magic_name__ ).groups()[0]
elif _re_pt_models.match(__magic_name__ ) is not None:
snake_case_ : int = pt_models
snake_case_ : Union[str, Any] = _re_pt_models.match(__magic_name__ ).groups()[0]
if lookup_dict is not None:
while len(__magic_name__ ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ : List[Any] = True
break
# Try again after removing the last word in the name
snake_case_ : str = "".join(camel_case_split(__magic_name__ )[:-1] )
snake_case_ : Dict = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ : Union[str, Any] = list(__magic_name__ )
all_models.sort()
snake_case_ : Optional[int] = {"model_type": all_models}
snake_case_ : Union[str, Any] = [pt_models[t] for t in all_models]
snake_case_ : str = [tf_models[t] for t in all_models]
snake_case_ : Union[str, Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ : Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ : Dict = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ : str = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ : Tuple = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ : List[Any] = "AutoTokenizer"
snake_case_ : str = [processors[t] for t in all_models]
return pd.DataFrame(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ : List[str] = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
snake_case_ : List[Any] = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(__magic_name__ ,__magic_name__ ,__magic_name__ ):
# The type of pipeline may not exist in this framework
if not hasattr(__magic_name__ ,__magic_name__ ):
continue
# First extract all model_names
snake_case_ : Optional[int] = []
for name in getattr(__magic_name__ ,__magic_name__ ).values():
if isinstance(__magic_name__ ,__magic_name__ ):
model_names.append(__magic_name__ )
else:
model_names.extend(list(__magic_name__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : int = get_frameworks_table()
snake_case_ : Tuple = Dataset.from_pandas(__magic_name__ )
snake_case_ : Optional[Any] = hf_hub_download(
"huggingface/transformers-metadata" ,"pipeline_tags.json" ,repo_type="dataset" ,token=__magic_name__ )
snake_case_ : str = Dataset.from_json(__magic_name__ )
snake_case_ : Optional[int] = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__magic_name__ ) )
}
snake_case_ : str = update_pipeline_and_auto_class_table(__magic_name__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ : List[Any] = sorted(table.keys() )
snake_case_ : int = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
snake_case_ : str = Dataset.from_pandas(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__magic_name__ ,"frameworks.json" ) )
tags_dataset.to_json(os.path.join(__magic_name__ ,"pipeline_tags.json" ) )
if commit_sha is not None:
snake_case_ : Tuple = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
snake_case_ : Tuple = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" ,folder_path=__magic_name__ ,repo_type="dataset" ,token=__magic_name__ ,commit_message=__magic_name__ ,)
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ : int = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ : Optional[int] = pipeline_tasks[key]["pt"]
if isinstance(__magic_name__ ,(list, tuple) ):
snake_case_ : Tuple = model[0]
snake_case_ : Dict = model.__name__
if model not in in_table.values():
missing.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Dict = ", ".join(__magic_name__ )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
__lowerCamelCase : Union[str, Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = [[1, 2, 4], [1, 2, 3, 4]]
snake_case_ : str = DisjunctiveConstraint(lowerCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__ ) )
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint(lowerCAmelCase__ ) # fails here
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : str = [[1, 2, 3], [1, 2, 4]]
snake_case_ : Optional[int] = DisjunctiveConstraint(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_ : int = dc.update(1 )
snake_case_ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case_, snake_case_, snake_case_ : Optional[int] = dc.update(2 )
snake_case_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case_, snake_case_, snake_case_ : Optional[Any] = dc.update(3 )
snake_case_ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case_ : Tuple = DisjunctiveConstraint(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_ : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case_, snake_case_, snake_case_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case_, snake_case_, snake_case_ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case_, snake_case_, snake_case_ : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case_, snake_case_, snake_case_ : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case_, snake_case_, snake_case_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case_, snake_case_, snake_case_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Dict = 0
if start < end:
snake_case_ : Dict = randint(__magic_name__ ,__magic_name__ )
snake_case_ : int = a[end]
snake_case_ : str = a[pivot]
snake_case_ : Optional[int] = temp
snake_case_, snake_case_ : List[str] = _in_place_partition(__magic_name__ ,__magic_name__ ,__magic_name__ )
count += _in_place_quick_sort(__magic_name__ ,__magic_name__ ,p - 1 )
count += _in_place_quick_sort(__magic_name__ ,p + 1 ,__magic_name__ )
return count
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[Any] = randint(__magic_name__ ,__magic_name__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Optional[int] = a[pivot]
snake_case_ : Dict = temp
snake_case_ : str = start - 1
for index in range(__magic_name__ ,__magic_name__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Tuple = new_pivot_index + 1
snake_case_ : List[Any] = a[new_pivot_index]
snake_case_ : Tuple = a[index]
snake_case_ : List[Any] = temp
snake_case_ : List[str] = a[new_pivot_index + 1]
snake_case_ : List[str] = a[end]
snake_case_ : List[Any] = temp
return new_pivot_index + 1, count
__lowerCamelCase : Optional[int] = TemporaryFile()
__lowerCamelCase : Any = 100 # 1000 elements are to be sorted
__lowerCamelCase , __lowerCamelCase : Dict = 0, 1 # mean and standard deviation
__lowerCamelCase : Optional[int] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
__lowerCamelCase : Any = np.load(outfile)
__lowerCamelCase : Optional[int] = len(M) - 1
__lowerCamelCase : str = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__lowerCamelCase : Any = get_logger(__name__)
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any]=None ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Any = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class A_ :
"""simple docstring"""
a__ = []
def __init__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str=None ) -> List[str]:
'''simple docstring'''
snake_case_ : str = obj
snake_case_ : Union[str, Any] = target
snake_case_ : Union[str, Any] = new
snake_case_ : int = target.split("." )[0]
snake_case_ : Tuple = {}
snake_case_ : int = attrs or []
def __enter__( self :List[str] ) -> List[Any]:
'''simple docstring'''
*snake_case_, snake_case_ : List[Any] = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
snake_case_ : List[str] = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
snake_case_ : Any = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
snake_case_ : Dict = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
snake_case_ : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
snake_case_ : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
snake_case_ : Tuple = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
snake_case_ : List[str] = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
snake_case_ : str = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :Tuple , *lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowerCamelCase : Tuple = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCamelCase : Any = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCamelCase : List[str] = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowerCamelCase : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def __UpperCAmelCase ( __magic_name__ )-> dict[Any, Any]:
"""simple docstring"""
snake_case_ : List[str] = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(__magic_name__ ,headers=__magic_name__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : str = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ['''ConditionalDetrFeatureExtractor''']
__lowerCamelCase : Union[str, Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCamelCase : Union[str, Any] = 500000
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = os.path.split(__file__)
__lowerCamelCase : Union[str, Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __UpperCAmelCase ( __magic_name__ ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = dataset.map(**__magic_name__ )
@get_duration
def __UpperCAmelCase ( __magic_name__ ,**__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = dataset.filter(**__magic_name__ )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : List[str] = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
snake_case_ : int = generate_example_dataset(
os.path.join(__magic_name__ ,"dataset.arrow" ) ,__magic_name__ ,num_examples=__magic_name__ )
snake_case_ : Tuple = transformers.AutoTokenizer.from_pretrained("bert-base-cased" ,use_fast=__magic_name__ )
def tokenize(__magic_name__ ):
return tokenizer(examples["text"] )
snake_case_ : List[str] = map(__magic_name__ )
snake_case_ : Tuple = map(__magic_name__ ,batched=__magic_name__ )
snake_case_ : Union[str, Any] = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="numpy" ):
snake_case_ : str = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="pandas" ):
snake_case_ : int = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="torch" ,columns="numbers" ):
snake_case_ : Union[str, Any] = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="tensorflow" ,columns="numbers" ):
snake_case_ : str = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
snake_case_ : Dict = map(__magic_name__ ,function=__magic_name__ ,batched=__magic_name__ )
snake_case_ : List[str] = filter(__magic_name__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__magic_name__ ,"wb" ) as f:
f.write(json.dumps(__magic_name__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=False ,__magic_name__=True )-> Optional[int]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case_, snake_case_, snake_case_, snake_case_ : str = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
snake_case_ : int = cached_file(__magic_name__ ,__magic_name__ ,force_download=not use_cached_models )
snake_case_ : Any = config_class.from_json_file(__magic_name__ )
snake_case_ : Union[str, Any] = True
snake_case_ : Tuple = True
print(F'''Building TensorFlow model from configuration: {config}''' )
snake_case_ : Tuple = model_class(__magic_name__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
snake_case_ : str = cached_file(
__magic_name__ ,__magic_name__ ,force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
snake_case_ : Dict = load_pytorch_checkpoint_in_tfa_model(__magic_name__ ,__magic_name__ )
if compare_with_pt_model:
snake_case_ : Tuple = tf_model(tf_model.dummy_inputs ,training=__magic_name__ ) # build the network
snake_case_ : Union[str, Any] = torch.load(__magic_name__ ,map_location="cpu" )
snake_case_ : int = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__magic_name__ ,config=__magic_name__ ,state_dict=__magic_name__ )
with torch.no_grad():
snake_case_ : List[str] = pt_model(**pt_model.dummy_inputs )
snake_case_ : List[Any] = pto[0].numpy()
snake_case_ : int = tfo[0].numpy()
snake_case_ : str = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__magic_name__ ,save_format="h5" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None ,__magic_name__=False ,__magic_name__=False ,__magic_name__=False ,__magic_name__=False ,)-> List[str]:
"""simple docstring"""
if args_model_type is None:
snake_case_ : List[str] = list(MODEL_CLASSES.keys() )
else:
snake_case_ : Tuple = [args_model_type]
for j, model_type in enumerate(__magic_name__ ,start=1 ):
print("=" * 100 )
print(F''' Converting model type {j}/{len(__magic_name__ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
snake_case_ : Tuple = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
snake_case_ : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__magic_name__ ,__magic_name__ ) ,start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
snake_case_ : Any = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__magic_name__ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
snake_case_ : Optional[Any] = cached_file(__magic_name__ ,__magic_name__ ,force_download=not use_cached_models )
else:
snake_case_ : List[str] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
snake_case_ : Optional[int] = cached_file(__magic_name__ ,__magic_name__ ,force_download=not use_cached_models )
else:
snake_case_ : Dict = model_shortcut_name
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=__magic_name__ ,pytorch_checkpoint_path=__magic_name__ ,config_file=__magic_name__ ,tf_dump_path=os.path.join(__magic_name__ ,model_shortcut_name + "-tf_model.h5" ) ,compare_with_pt_model=__magic_name__ ,)
if remove_cached_files:
os.remove(__magic_name__ )
os.remove(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
__lowerCamelCase : Optional[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def count_of_possible_combinations(__magic_name__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__magic_name__ ,__magic_name__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
snake_case_ : Optional[Any] = sum(
count_of_possible_combinations_with_dp_array(target - item ,__magic_name__ )
for item in array )
snake_case_ : Optional[int] = answer
return answer
snake_case_ : Union[str, Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__magic_name__ ,__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (target + 1)
snake_case_ : Union[str, Any] = 1
for i in range(1 ,target + 1 ):
for j in range(__magic_name__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : int = 3
__lowerCamelCase : Tuple = 5
__lowerCamelCase : List[str] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list:
"""simple docstring"""
def merge(__magic_name__ ,__magic_name__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__magic_name__ ) <= 1:
return collection
snake_case_ : List[Any] = len(__magic_name__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = SMALL_MODEL_IDENTIFIER
snake_case_ : int = "pt"
snake_case_ : str = "tf"
def _A ( self :List[str] , lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase__ )
model_tf.save_pretrained(lowerCAmelCase__ )
def _A ( self :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case_ : Any = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase__ )
snake_case_ : Any = FeaturesManager.determine_framework(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase__ )
snake_case_ : Optional[int] = FeaturesManager.determine_framework(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :str ) -> Tuple:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase__ )
snake_case_ : int = FeaturesManager.determine_framework(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase__ )
snake_case_ : List[Any] = FeaturesManager.determine_framework(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = FeaturesManager.determine_framework(lowerCAmelCase__ )
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = MagicMock(return_value=lowerCAmelCase__ )
with patch("transformers.onnx.features.is_tf_available" , lowerCAmelCase__ ):
snake_case_ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case_ : Optional[Any] = MagicMock(return_value=lowerCAmelCase__ )
with patch("transformers.onnx.features.is_torch_available" , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
snake_case_ : List[str] = MagicMock(return_value=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = MagicMock(return_value=lowerCAmelCase__ )
with patch("transformers.onnx.features.is_tf_available" , lowerCAmelCase__ ), patch(
"transformers.onnx.features.is_torch_available" , lowerCAmelCase__ ):
snake_case_ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase__ , self.framework_pt )
# Both not in environment -> raise error
snake_case_ : Optional[int] = MagicMock(return_value=lowerCAmelCase__ )
snake_case_ : Any = MagicMock(return_value=lowerCAmelCase__ )
with patch("transformers.onnx.features.is_tf_available" , lowerCAmelCase__ ), patch(
"transformers.onnx.features.is_torch_available" , lowerCAmelCase__ ):
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : str = FeaturesManager.determine_framework(self.test_model )
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
assert isinstance(__magic_name__ ,__magic_name__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
snake_case_ : Dict = range(3 ,int(math.sqrt(__magic_name__ ) + 1 ) ,2 )
return not any(not number % i for i in odd_numbers )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=1 ,**__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Any = factor * value
snake_case_ : List[Any] = value
while not is_prime(__magic_name__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 ,**__magic_name__ )
return value
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A_ (a_ ):
"""simple docstring"""
a__ = ['''vqvae''']
def __init__( self :Optional[int] , lowerCAmelCase__ :AutoencoderKL , lowerCAmelCase__ :UNetaDConditionModel , lowerCAmelCase__ :Mel , lowerCAmelCase__ :Union[DDIMScheduler, DDPMScheduler] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def _A ( self :int ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_000
@torch.no_grad()
def __call__( self :List[Any] , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = None , lowerCAmelCase__ :np.ndarray = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :int = None , lowerCAmelCase__ :torch.Generator = None , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :torch.Generator = None , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :torch.Tensor = None , lowerCAmelCase__ :torch.Tensor = None , lowerCAmelCase__ :Any=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
snake_case_ : Optional[int] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
snake_case_ : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
snake_case_ : int = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case_ : Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
snake_case_ : Optional[int] = noise
snake_case_ : List[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = self.mel.audio_slice_to_image(lowerCAmelCase__ )
snake_case_ : List[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
snake_case_ : int = (input_image / 255) * 2 - 1
snake_case_ : str = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
snake_case_ : Optional[Any] = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
snake_case_ : Optional[int] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case_ : int = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
snake_case_ : Any = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case_ : Optional[int] = int(mask_start_secs * pixels_per_second )
snake_case_ : int = int(mask_end_secs * pixels_per_second )
snake_case_ : List[str] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
snake_case_ : Dict = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
else:
snake_case_ : Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
if isinstance(self.scheduler , lowerCAmelCase__ ):
snake_case_ : List[str] = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
else:
snake_case_ : Optional[int] = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
snake_case_ : int = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case_ : int = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case_ : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
snake_case_ : List[str] = self.vqvae.decode(lowerCAmelCase__ )["sample"]
snake_case_ : List[str] = (images / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
snake_case_ : int = (images * 255).round().astype("uint8" )
snake_case_ : Union[str, Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode="RGB" ).convert("L" ) for _ in images) )
snake_case_ : str = [self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def _A ( self :List[str] , lowerCAmelCase__ :List[Image.Image] , lowerCAmelCase__ :int = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
snake_case_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
snake_case_ : Optional[Any] = (sample / 255) * 2 - 1
snake_case_ : Union[str, Any] = torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
snake_case_ : Any = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case_ : Any = self.scheduler.alphas_cumprod[t]
snake_case_ : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case_ : Any = 1 - alpha_prod_t
snake_case_ : int = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
snake_case_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case_ : int = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case_ : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _A ( lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :float ) -> torch.Tensor:
'''simple docstring'''
snake_case_ : Any = acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowerCamelCase : Any = random.Random()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=1.0 ,__magic_name__=None ,__magic_name__=None )-> List[str]:
"""simple docstring"""
if rng is None:
snake_case_ : List[Any] = global_rng
snake_case_ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int=7 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Tuple=2_000 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :int=16_000 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :str=80 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=64 , lowerCAmelCase__ :List[str]="hann_window" , lowerCAmelCase__ :List[Any]=80 , lowerCAmelCase__ :Optional[Any]=7_600 , lowerCAmelCase__ :Dict=1E-1_0 , lowerCAmelCase__ :int=True , ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : int = batch_size
snake_case_ : Dict = min_seq_length
snake_case_ : Any = max_seq_length
snake_case_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : int = feature_size
snake_case_ : Tuple = padding_value
snake_case_ : Tuple = sampling_rate
snake_case_ : Union[str, Any] = do_normalize
snake_case_ : int = num_mel_bins
snake_case_ : List[str] = hop_length
snake_case_ : str = win_length
snake_case_ : Optional[int] = win_function
snake_case_ : str = fmin
snake_case_ : List[Any] = fmax
snake_case_ : List[Any] = mel_floor
snake_case_ : Any = return_attention_mask
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _A ( self :Any , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :List[Any]=False ) -> int:
'''simple docstring'''
def _flatten(lowerCAmelCase__ :int ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
snake_case_ : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ : int = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : Tuple = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def _A ( self :int , lowerCAmelCase__ :int=False , lowerCAmelCase__ :str=False ) -> Union[str, Any]:
'''simple docstring'''
if equal_length:
snake_case_ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : List[str] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : List[Any] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechTaFeatureExtractor
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
snake_case_ : List[str] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ : Tuple = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
snake_case_ : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
snake_case_ : List[str] = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
snake_case_ : List[Any] = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _A ( self :Any ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
snake_case_ : Dict = ["longest", "max_length", "do_not_pad"]
snake_case_ : Any = [None, 1_600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : int = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
snake_case_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : List[Any] = range(800 , 1_400 , 200 )
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
snake_case_ : Union[str, Any] = ["longest", "max_length", "do_not_pad"]
snake_case_ : List[str] = [None, 1_600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
snake_case_ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
snake_case_ : str = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1_000 , padding="max_length" , return_tensors="np" )
snake_case_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
snake_case_ : List[str] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1_000 , padding="longest" , return_tensors="np" )
snake_case_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
snake_case_ : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
snake_case_ : Optional[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2_000 , padding="longest" , return_tensors="np" )
snake_case_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def _A ( self :str ) -> int:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : int = np.random.rand(100 ).astype(np.floataa )
snake_case_ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Optional[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
snake_case_ : Dict = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : int = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case_ : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
snake_case_ : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
snake_case_ : List[str] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
snake_case_ : int = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : int = np.asarray(lowerCAmelCase__ )
snake_case_ : Dict = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
snake_case_ : Union[str, Any] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _A ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : Tuple = feat_extract.model_input_names[0]
snake_case_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
snake_case_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
snake_case_ : int = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
snake_case_ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : int = feat_extract.model_input_names[0]
snake_case_ : Any = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
snake_case_ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ : Tuple = feat_extract.model_input_names[0]
snake_case_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
snake_case_ : List[Any] = feat_extract.num_mel_bins # hack!
snake_case_ : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
snake_case_ : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.feat_extract_dict
snake_case_ : Tuple = True
snake_case_ : List[str] = self.feature_extraction_class(**lowerCAmelCase__ )
snake_case_ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ : Tuple = [len(lowerCAmelCase__ ) for x in speech_inputs]
snake_case_ : Tuple = feat_extract.model_input_names[0]
snake_case_ : Tuple = BatchFeature({input_name: speech_inputs} )
snake_case_ : str = feat_extract.num_mel_bins # hack!
snake_case_ : Union[str, Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.feat_extract_dict
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = self.feature_extraction_class(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
snake_case_ : str = [len(lowerCAmelCase__ ) for x in speech_inputs]
snake_case_ : int = feat_extract.model_input_names[0]
snake_case_ : Any = BatchFeature({input_name: speech_inputs} )
snake_case_ : List[Any] = min(lowerCAmelCase__ )
snake_case_ : str = feat_extract.num_mel_bins # hack!
snake_case_ : List[str] = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _A ( self :List[Any] , lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
from datasets import load_dataset
snake_case_ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case_ : Dict = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.tensor(
[2.3_8_0_4E-0_3, 2.0_7_5_2E-0_3, 1.9_8_3_6E-0_3, 2.1_0_5_7E-0_3, 1.6_1_7_4E-0_3,
3.0_5_1_8E-0_4, 9.1_5_5_3E-0_5, 3.3_5_6_9E-0_4, 9.7_6_5_6E-0_4, 1.8_3_1_1E-0_3,
2.0_1_4_2E-0_3, 2.1_0_5_7E-0_3, 1.7_3_9_5E-0_3, 4.5_7_7_6E-0_4, -3.9_6_7_3E-0_4,
4.5_7_7_6E-0_4, 1.0_0_7_1E-0_3, 9.1_5_5_3E-0_5, 4.8_8_2_8E-0_4, 1.1_5_9_7E-0_3,
7.3_2_4_2E-0_4, 9.4_6_0_4E-0_4, 1.8_0_0_5E-0_3, 1.8_3_1_1E-0_3, 8.8_5_0_1E-0_4,
4.2_7_2_5E-0_4, 4.8_8_2_8E-0_4, 7.3_2_4_2E-0_4, 1.0_9_8_6E-0_3, 2.1_0_5_7E-0_3] )
# fmt: on
snake_case_ : Any = self._load_datasamples(1 )
snake_case_ : Optional[Any] = SpeechTaFeatureExtractor()
snake_case_ : List[Any] = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def _A ( self :Any ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
snake_case_ : Dict = self._load_datasamples(1 )
snake_case_ : List[str] = SpeechTaFeatureExtractor()
snake_case_ : Optional[Any] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''marian'''
a__ = ['''past_key_values''']
a__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :str , lowerCAmelCase__ :Dict=58_101 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_024 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :int=4_096 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :str=4_096 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Dict=0.0 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :int=58_100 , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[str]=58_100 , lowerCAmelCase__ :Optional[int]=0 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :str=True , **lowerCAmelCase__ :Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case_ : Tuple = vocab_size
snake_case_ : str = decoder_vocab_size or vocab_size
snake_case_ : int = max_position_embeddings
snake_case_ : Tuple = d_model
snake_case_ : Optional[int] = encoder_ffn_dim
snake_case_ : Union[str, Any] = encoder_layers
snake_case_ : Optional[Any] = encoder_attention_heads
snake_case_ : List[str] = decoder_ffn_dim
snake_case_ : List[str] = decoder_layers
snake_case_ : Dict = decoder_attention_heads
snake_case_ : List[Any] = dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : int = activation_dropout
snake_case_ : str = activation_function
snake_case_ : Any = init_std
snake_case_ : Dict = encoder_layerdrop
snake_case_ : str = decoder_layerdrop
snake_case_ : Tuple = use_cache
snake_case_ : Optional[Any] = encoder_layers
snake_case_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class A_ (a_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _A ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case_ : int = {0: "batch"}
snake_case_ : str = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case_ : Tuple = {0: "batch", 1: "decoder_sequence"}
snake_case_ : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case_, snake_case_ : List[Any] = self.num_layers
for i in range(lowerCAmelCase__ ):
snake_case_ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
snake_case_ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
snake_case_ : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _A ( self :str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : str = super().outputs
else:
snake_case_ : Optional[int] = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
snake_case_, snake_case_ : List[str] = self.num_layers
for i in range(lowerCAmelCase__ ):
snake_case_ : int = {0: "batch", 2: "past_sequence + sequence"}
snake_case_ : int = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _A ( self :str , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
snake_case_ : Union[str, Any] = seq_length if not self.use_past else 1
snake_case_ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
snake_case_ : Union[str, Any] = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_, snake_case_ : Optional[Any] = common_inputs["input_ids"].shape
snake_case_ : Dict = common_inputs["decoder_input_ids"].shape[1]
snake_case_, snake_case_ : Optional[Any] = self.num_attention_heads
snake_case_ : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ : List[Any] = decoder_seq_length + 3
snake_case_ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ : Dict = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
snake_case_ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_, snake_case_ : str = self.num_layers
snake_case_ : List[str] = min(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : str = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
snake_case_ : Union[str, Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
snake_case_ : Tuple = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def _A ( self :str , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_, snake_case_ : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case_ : str = seqlen + 2
snake_case_, snake_case_ : Tuple = self.num_layers
snake_case_, snake_case_ : Dict = self.num_attention_heads
snake_case_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ : Union[str, Any] = common_inputs["attention_mask"].dtype
snake_case_ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
snake_case_ : Any = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def _A ( self :Any , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Any = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : Any = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
snake_case_ : Optional[Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def _A ( self :Union[str, Any] , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
snake_case_ : Tuple = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Dict = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
snake_case_ : List[Any] = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def _A ( self :Dict ) -> float:
'''simple docstring'''
return 1E-4
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import socket
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
snake_case_ : Any = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
snake_case_ : int = socket.gethostname()
snake_case_ : List[str] = 1_2312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" ,"wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case_ : int = sock.recv(1024 )
if not data:
break
out_file.write(__magic_name__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : List[str] = 1.6021E-19 # units = C
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,)-> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
__lowerCamelCase : Any = None
__lowerCamelCase : int = {
'''7B''': 11008,
'''13B''': 13824,
'''30B''': 17920,
'''65B''': 22016,
'''70B''': 28672,
}
__lowerCamelCase : List[str] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=1 ,__magic_name__=256 )-> List[Any]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ ,"r" ) as f:
return json.load(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=True )-> Union[str, Any]:
"""simple docstring"""
os.makedirs(__magic_name__ ,exist_ok=__magic_name__ )
snake_case_ : List[Any] = os.path.join(__magic_name__ ,"tmp" )
os.makedirs(__magic_name__ ,exist_ok=__magic_name__ )
snake_case_ : List[str] = read_json(os.path.join(__magic_name__ ,"params.json" ) )
snake_case_ : Optional[Any] = NUM_SHARDS[model_size]
snake_case_ : Optional[Any] = params["n_layers"]
snake_case_ : Tuple = params["n_heads"]
snake_case_ : int = n_heads // num_shards
snake_case_ : Any = params["dim"]
snake_case_ : List[Any] = dim // n_heads
snake_case_ : List[Any] = 10_000.0
snake_case_ : int = 1.0 / (base ** (torch.arange(0 ,__magic_name__ ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Union[str, Any] = params["n_kv_heads"] # for GQA / MQA
snake_case_ : Dict = n_heads_per_shard // num_key_value_heads
snake_case_ : Union[str, Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : Dict = n_heads
snake_case_ : Any = n_heads_per_shard
snake_case_ : Optional[Any] = dim
# permute for sliced rotary
def permute(__magic_name__ ,__magic_name__=n_heads ,__magic_name__=dim ,__magic_name__=dim ):
return w.view(__magic_name__ ,dima // n_heads // 2 ,2 ,__magic_name__ ).transpose(1 ,2 ).reshape(__magic_name__ ,__magic_name__ )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : Dict = torch.load(os.path.join(__magic_name__ ,"consolidated.00.pth" ) ,map_location="cpu" )
else:
# Sharded
snake_case_ : List[Any] = [
torch.load(os.path.join(__magic_name__ ,F'''consolidated.{i:02d}.pth''' ) ,map_location="cpu" )
for i in range(__magic_name__ )
]
snake_case_ : List[str] = 0
snake_case_ : Any = {"weight_map": {}}
for layer_i in range(__magic_name__ ):
snake_case_ : Optional[Any] = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : Any = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : List[str] = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : List[Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(__magic_name__ ,__magic_name__ ,__magic_name__ )
for i in range(__magic_name__ )
] ,dim=0 ,).reshape(__magic_name__ ,__magic_name__ ) )
snake_case_ : Tuple = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
__magic_name__ ,__magic_name__ ,__magic_name__ )
for i in range(__magic_name__ )
] ,dim=0 ,).reshape(__magic_name__ ,__magic_name__ ) ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)
snake_case_ : Dict = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
__magic_name__ ,__magic_name__ ,__magic_name__ )
for i in range(__magic_name__ )
] ,dim=0 ,).reshape(__magic_name__ ,__magic_name__ )
snake_case_ : List[Any] = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(__magic_name__ )] ,dim=1 )
snake_case_ : int = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(__magic_name__ )] ,dim=0 )
snake_case_ : Any = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(__magic_name__ )] ,dim=1 )
snake_case_ : int = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(__magic_name__ )] ,dim=0 )
snake_case_ : Union[str, Any] = inv_freq
for k, v in state_dict.items():
snake_case_ : Optional[Any] = filename
param_count += v.numel()
torch.save(__magic_name__ ,os.path.join(__magic_name__ ,__magic_name__ ) )
snake_case_ : List[str] = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : Optional[int] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
snake_case_ : Optional[int] = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(__magic_name__ )] ,dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(__magic_name__ )] ,dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : Dict = filename
param_count += v.numel()
torch.save(__magic_name__ ,os.path.join(__magic_name__ ,__magic_name__ ) )
# Write configs
snake_case_ : List[Any] = {"total_size": param_count * 2}
write_json(__magic_name__ ,os.path.join(__magic_name__ ,"pytorch_model.bin.index.json" ) )
snake_case_ : Tuple = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
snake_case_ : Tuple = params["multiple_of"] if "multiple_of" in params else 256
snake_case_ : Tuple = LlamaConfig(
hidden_size=__magic_name__ ,intermediate_size=compute_intermediate_size(__magic_name__ ,__magic_name__ ,__magic_name__ ) ,num_attention_heads=params["n_heads"] ,num_hidden_layers=params["n_layers"] ,rms_norm_eps=params["norm_eps"] ,num_key_value_heads=__magic_name__ ,)
config.save_pretrained(__magic_name__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
snake_case_ : Union[str, Any] = LlamaForCausalLM.from_pretrained(__magic_name__ ,torch_dtype=torch.floataa ,low_cpu_mem_usage=__magic_name__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(__magic_name__ ,safe_serialization=__magic_name__ )
shutil.rmtree(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : int = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : List[str] = tokenizer_class(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" ,help="Location of LLaMA weights, which contains tokenizer.model and model folders" ,)
parser.add_argument(
"--model_size" ,choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] ,)
parser.add_argument(
"--output_dir" ,help="Location to write HF model and tokenizer" ,)
parser.add_argument("--safe_serialization" ,type=__magic_name__ ,help="Whether or not to save using `safetensors`." )
snake_case_ : Tuple = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
snake_case_ : Tuple = os.path.join(args.input_dir ,"tokenizer.model" )
write_tokenizer(args.output_dir ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
__lowerCamelCase : List[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
__lowerCamelCase : Union[str, Any] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
__lowerCamelCase : Union[str, Any] = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :int ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Any="uniform_average" , lowerCAmelCase__ :List[Any]=True ) -> Dict:
'''simple docstring'''
snake_case_ : str = mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse}
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A_ (a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :float , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Dict = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = False
snake_case_ : Union[str, Any] = nn.Dropout(p=lowerCAmelCase__ )
snake_case_ : List[str] = TaConfig(
vocab_size=lowerCAmelCase__ , d_model=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , feed_forward_proj=lowerCAmelCase__ , is_decoder=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , )
snake_case_ : List[str] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
snake_case_ : Any = TaBlock(lowerCAmelCase__ )
self.encoders.append(lowerCAmelCase__ )
snake_case_ : int = TaLayerNorm(lowerCAmelCase__ )
snake_case_ : Any = nn.Dropout(p=lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.token_embedder(lowerCAmelCase__ )
snake_case_ : Optional[int] = encoder_input_tokens.shape[1]
snake_case_ : Tuple = torch.arange(lowerCAmelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase__ )
snake_case_ : List[str] = self.dropout_pre(lowerCAmelCase__ )
# inverted the attention mask
snake_case_ : Union[str, Any] = encoder_input_tokens.size()
snake_case_ : Tuple = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ )
for lyr in self.encoders:
snake_case_ : List[str] = lyr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
snake_case_ : Dict = self.layer_norm(lowerCAmelCase__ )
return self.dropout_post(lowerCAmelCase__ ), encoder_inputs_mask
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCamelCase : Any = False
__lowerCamelCase : int = True
__lowerCamelCase : int = False
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : List[str] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__lowerCamelCase : Optional[int] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__lowerCamelCase : Union[str, Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
__lowerCamelCase : Any = reader.read()
__lowerCamelCase : Dict = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
__lowerCamelCase : List[Any] = UNetaDModel(**config)
else:
__lowerCamelCase : int = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__lowerCamelCase : int = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCamelCase : List[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCamelCase : List[str] = config[key]
del config[key]
__lowerCamelCase : List[str] = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
__lowerCamelCase : List[Any] = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
__lowerCamelCase : Any = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
__lowerCamelCase : List[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
__lowerCamelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
__lowerCamelCase : Optional[int] = param_value
__lowerCamelCase : str = True
if not has_changed:
__lowerCamelCase : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ (a_ ):
"""simple docstring"""
a__ = (DPMSolverSinglestepScheduler,)
a__ = (('''num_inference_steps''', 25),)
def _A ( self :Dict , **lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCAmelCase__ )
return config
def _A ( self :int , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = dict(self.forward_default_kwargs )
snake_case_ : Dict = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
snake_case_ : Any = self.dummy_sample
snake_case_ : Union[str, Any] = 0.1 * sample
snake_case_ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ : List[str] = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : int = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
snake_case_ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
snake_case_ : Tuple = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
snake_case_ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_, snake_case_ : int = sample, sample
for t in range(lowerCAmelCase__ , time_step + scheduler.config.solver_order + 1 ):
snake_case_ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
snake_case_ : Dict = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=0 , **lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = dict(self.forward_default_kwargs )
snake_case_ : Tuple = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
snake_case_ : List[str] = self.dummy_sample
snake_case_ : Tuple = 0.1 * sample
snake_case_ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : int = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
snake_case_ : int = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ : List[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
snake_case_ : str = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
if scheduler is None:
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : List[Any] = scheduler_class(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = 10
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case_ : Union[str, Any] = 50
snake_case_ : Optional[Any] = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
snake_case_ : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
snake_case_ : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def _A ( self :List[str] ) -> int:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case_ : Any = self.full_loop(scheduler=lowerCAmelCase__ )
snake_case_ : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
snake_case_ : int = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ : List[str] = self.full_loop(scheduler=lowerCAmelCase__ )
snake_case_ : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCAmelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , )
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
snake_case_ : Optional[Any] = self.full_loop(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
assert not torch.isnan(lowerCAmelCase__ ).any(), "Samples have nan numbers"
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
self.check_over_configs(variance_type=lowerCAmelCase__ )
self.check_over_configs(variance_type="learned_range" )
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=0 )
def _A ( self :str ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.full_loop()
snake_case_ : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.full_loop(use_karras_sigmas=lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.full_loop(prediction_type="v_prediction" )
snake_case_ : Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCAmelCase__ )
snake_case_ : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config(thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0 )
snake_case_ : Optional[int] = scheduler_class(**lowerCAmelCase__ )
snake_case_ : str = 10
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :CLIPSegForImageSegmentation , lowerCAmelCase__ :CLIPSegProcessor , lowerCAmelCase__ :AutoencoderKL , lowerCAmelCase__ :CLIPTextModel , lowerCAmelCase__ :CLIPTokenizer , lowerCAmelCase__ :UNetaDConditionModel , lowerCAmelCase__ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ :StableDiffusionSafetyChecker , lowerCAmelCase__ :CLIPImageProcessor , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
snake_case_ : str = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
snake_case_ : List[Any] = dict(scheduler.config )
snake_case_ : Tuple = 1
snake_case_ : int = FrozenDict(lowerCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
snake_case_ : Tuple = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
snake_case_ : Any = dict(scheduler.config )
snake_case_ : Optional[Any] = True
snake_case_ : Dict = FrozenDict(lowerCAmelCase__ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCAmelCase__ , segmentation_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _A ( self :str , lowerCAmelCase__ :Optional[Union[str, int]] = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
snake_case_ : Optional[Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self :Optional[Any] , lowerCAmelCase__ :Union[str, List[str]] , lowerCAmelCase__ :Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ :str , lowerCAmelCase__ :int = 512 , lowerCAmelCase__ :int = 512 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :float = 7.5 , lowerCAmelCase__ :Optional[Union[str, List[str]]] = None , lowerCAmelCase__ :Optional[int] = 1 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :Optional[torch.Generator] = None , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ :int = 1 , **lowerCAmelCase__ :Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
snake_case_ : int = self.segmentation_model(**lowerCAmelCase__ )
snake_case_ : str = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
snake_case_ : Optional[Any] = self.numpy_to_pil(lowerCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
snake_case_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCamelCase : Optional[Any] = trt.Logger(trt.Logger.WARNING)
__lowerCamelCase : List[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCamelCase : List[str] = logging.getLogger(__name__)
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
if args.tokenizer_name:
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__lowerCamelCase : int = args.per_device_eval_batch_size
__lowerCamelCase : str = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCamelCase : str = True
__lowerCamelCase : int = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__lowerCamelCase : int = '''temp_engine/bert-fp16.engine'''
if args.inta:
__lowerCamelCase : str = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__lowerCamelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCamelCase : Any = [network.get_input(i) for i in range(network.num_inputs)]
__lowerCamelCase : Union[str, Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCamelCase : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCamelCase : Optional[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCamelCase : Tuple = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = np.asarray(inputs["input_ids"] ,dtype=np.intaa )
snake_case_ : Any = np.asarray(inputs["attention_mask"] ,dtype=np.intaa )
snake_case_ : int = np.asarray(inputs["token_type_ids"] ,dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] ,input_ids.ravel() ,__magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] ,attention_mask.ravel() ,__magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] ,token_type_ids.ravel() ,__magic_name__ )
# start time
snake_case_ : int = time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] ,stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ ,__magic_name__ ,__magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ ,__magic_name__ ,__magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
snake_case_ : str = time.time()
snake_case_ : Tuple = end_time - start_time
snake_case_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCamelCase : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCamelCase : Dict = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCamelCase : Any = raw_datasets['''validation'''].column_names
__lowerCamelCase : Tuple = '''question''' if '''question''' in column_names else column_names[0]
__lowerCamelCase : Union[str, Any] = '''context''' if '''context''' in column_names else column_names[1]
__lowerCamelCase : str = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCamelCase : Any = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__lowerCamelCase : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
snake_case_ : Optional[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] ,examples[context_column_name if pad_on_right else question_column_name] ,truncation="only_second" if pad_on_right else "only_first" ,max_length=__magic_name__ ,stride=args.doc_stride ,return_overflowing_tokens=__magic_name__ ,return_offsets_mapping=__magic_name__ ,padding="max_length" ,)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
snake_case_ : Dict = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
snake_case_ : Optional[int] = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
snake_case_ : List[str] = tokenized_examples.sequence_ids(__magic_name__ )
snake_case_ : Union[str, Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
snake_case_ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
snake_case_ : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__lowerCamelCase : Dict = raw_datasets['''validation''']
# Validation Feature Creation
__lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__lowerCamelCase : Dict = default_data_collator
__lowerCamelCase : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__lowerCamelCase : Any = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__="eval" )-> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = postprocess_qa_predictions(
examples=__magic_name__ ,features=__magic_name__ ,predictions=__magic_name__ ,version_2_with_negative=args.version_2_with_negative ,n_best_size=args.n_best_size ,max_answer_length=args.max_answer_length ,null_score_diff_threshold=args.null_score_diff_threshold ,output_dir=args.output_dir ,prefix=__magic_name__ ,)
# Format the result to the format the metric expects.
if args.version_2_with_negative:
snake_case_ : Union[str, Any] = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
snake_case_ : Optional[int] = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
snake_case_ : Optional[Any] = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ ,label_ids=__magic_name__ )
__lowerCamelCase : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCamelCase : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCamelCase : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCamelCase : List[str] = cuda.mem_alloc(h_outputa.nbytes)
__lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCamelCase : List[str] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__lowerCamelCase : Union[str, Any] = 0.0
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[int] = timeit.default_timer()
__lowerCamelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCamelCase , __lowerCamelCase : List[str] = outputs
__lowerCamelCase : str = torch.tensor(start_logits)
__lowerCamelCase : str = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCamelCase : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__lowerCamelCase : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__lowerCamelCase : Dict = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCamelCase : List[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__lowerCamelCase : Dict = nested_truncate(all_preds, len(eval_dataset))
__lowerCamelCase : Optional[Any] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCamelCase : Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ (a_ ):
"""simple docstring"""
a__ = (UniPCMultistepScheduler,)
a__ = (('''num_inference_steps''', 25),)
def _A ( self :Optional[int] , **lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**lowerCAmelCase__ )
return config
def _A ( self :Dict , lowerCAmelCase__ :Optional[int]=0 , **lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = dict(self.forward_default_kwargs )
snake_case_ : Union[str, Any] = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
snake_case_ : int = self.dummy_sample
snake_case_ : str = 0.1 * sample
snake_case_ : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ : Optional[Any] = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
snake_case_ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
snake_case_ : Optional[Any] = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
snake_case_ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_, snake_case_ : List[str] = sample, sample
for t in range(lowerCAmelCase__ , time_step + scheduler.config.solver_order + 1 ):
snake_case_ : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
snake_case_ : Dict = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _A ( self :Any , lowerCAmelCase__ :List[str]=0 , **lowerCAmelCase__ :List[str] ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = dict(self.forward_default_kwargs )
snake_case_ : Any = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.dummy_sample
snake_case_ : Optional[int] = 0.1 * sample
snake_case_ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ : List[Any] = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
snake_case_ : Any = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ : Optional[int] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
snake_case_ : Union[str, Any] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _A ( self :Any , lowerCAmelCase__ :Union[str, Any]=None , **lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
if scheduler is None:
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : List[Any] = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : Any = scheduler_class(**lowerCAmelCase__ )
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : Dict = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : int = scheduler_class(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = 10
snake_case_ : Optional[Any] = self.dummy_model()
snake_case_ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[str] = model(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case_ : int = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
snake_case_ : Tuple = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**lowerCAmelCase__ )
snake_case_ : str = self.dummy_sample
snake_case_ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps" ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps" ):
snake_case_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
snake_case_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ : str = scheduler.timesteps[5]
snake_case_ : Dict = scheduler.timesteps[6]
snake_case_ : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
snake_case_ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case_ : Optional[Any] = self.full_loop(scheduler=lowerCAmelCase__ )
snake_case_ : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
snake_case_ : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ : int = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ : Any = self.full_loop(scheduler=lowerCAmelCase__ )
snake_case_ : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCAmelCase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _A ( self :Dict ) -> str:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , )
snake_case_ : Optional[int] = self.full_loop(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , )
assert not torch.isnan(lowerCAmelCase__ ).any(), "Samples have nan numbers"
def _A ( self :str ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
def _A ( self :str ) -> str:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=0 )
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Any = self.full_loop()
snake_case_ : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.full_loop(prediction_type="v_prediction" )
snake_case_ : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config(thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0 )
snake_case_ : Optional[int] = scheduler_class(**lowerCAmelCase__ )
snake_case_ : str = 10
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
assert sample.dtype == torch.floataa
def _A ( self :Dict , **lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
snake_case_ : Optional[int] = self.get_scheduler_config(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = ""
for word_or_phrase in separated:
if not isinstance(__magic_name__ ,__magic_name__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(__magic_name__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
from collections.abc import Callable
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
snake_case_ : float = a
snake_case_ : float = b
if function(__magic_name__ ) == 0: # one of the a or b is a root for the function
return a
elif function(__magic_name__ ) == 0:
return b
elif (
function(__magic_name__ ) * function(__magic_name__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
snake_case_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__magic_name__ ) == 0:
return mid
elif function(__magic_name__ ) * function(__magic_name__ ) < 0:
snake_case_ : List[Any] = mid
else:
snake_case_ : str = mid
snake_case_ : Optional[Any] = start + (end - start) / 2.0
return mid
def __UpperCAmelCase ( __magic_name__ )-> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : int = int(__magic_name__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(__magic_name__ )
snake_case_, snake_case_ : Optional[int] = divmod(__magic_name__ ,2 )
return binary_recursive(__magic_name__ ) + str(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[int] = str(__magic_name__ ).strip()
if not number:
raise ValueError("No input value was provided" )
snake_case_ : Optional[Any] = "-" if number.startswith("-" ) else ""
snake_case_ : str = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F'''{negative}0b{binary_recursive(int(__magic_name__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class A_ (a_ ):
"""simple docstring"""
a__ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''image''': Image()} )
a__ = Features({'''labels''': ClassLabel} )
a__ = "image"
a__ = "labels"
def _A ( self :Optional[int] , lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case_ : List[str] = copy.deepcopy(self )
snake_case_ : Dict = self.label_schema.copy()
snake_case_ : str = features[self.label_column]
snake_case_ : Tuple = label_schema
return task_template
@property
def _A ( self :Union[str, Any] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ (a_ ):
"""simple docstring"""
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''BlipImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Any = self.image_processor
def __call__( self :Dict , lowerCAmelCase__ :ImageInput = None , lowerCAmelCase__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :Optional[int] , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
snake_case_ : Tuple = self.tokenizer
snake_case_ : str = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
# add pixel_values
snake_case_ : List[str] = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
if text is not None:
snake_case_ : int = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
snake_case_ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def _A ( self :Tuple , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Dict , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer.model_input_names
snake_case_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = 10
snake_case_ : Any = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
snake_case_ : str = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(__magic_name__ ) ),
} ,features=__magic_name__ ,)
return dataset
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Dict = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=__magic_name__ )
return filename
# FILE_CONTENT + files
__lowerCamelCase : Dict = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt"
snake_case_ : Dict = FILE_CONTENT
with open(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ )
return filename
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
import bza
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
snake_case_ : List[Any] = bytes(__magic_name__ ,"utf-8" )
with bza.open(__magic_name__ ,"wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
import gzip
snake_case_ : Any = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
snake_case_ : Optional[Any] = bytes(__magic_name__ ,"utf-8" )
with gzip.open(__magic_name__ ,"wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
snake_case_ : List[Any] = bytes(__magic_name__ ,"utf-8" )
with lza.frame.open(__magic_name__ ,"wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(__magic_name__ ,"w" ) as archive:
archive.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
import tarfile
snake_case_ : int = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(__magic_name__ ,"w" ) as f:
f.add(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
import lzma
snake_case_ : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
snake_case_ : Any = bytes(__magic_name__ ,"utf-8" )
with lzma.open(__magic_name__ ,"wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
import zipfile
snake_case_ : Any = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
snake_case_ : List[Any] = bytes(__magic_name__ ,"utf-8" )
with zstd.open(__magic_name__ ,"wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.xml"
snake_case_ : List[str] = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ )
return filename
__lowerCamelCase : str = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCamelCase : Dict = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCamelCase : List[str] = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCamelCase : List[str] = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCamelCase : Tuple = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Dict = datasets.Dataset.from_dict(__magic_name__ )
snake_case_ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con:
snake_case_ : Optional[int] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(__magic_name__ ,"w" ,newline="" ) as f:
snake_case_ : Any = csv.DictWriter(__magic_name__ ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(__magic_name__ ,"w" ,newline="" ) as f:
snake_case_ : Dict = csv.DictWriter(__magic_name__ ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
import bza
snake_case_ : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(__magic_name__ ,"rb" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__magic_name__ ,"wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.basename(csv_path.replace(".csv" ,".CSV" ) ) )
f.write(__magic_name__ ,arcname=os.path.basename(csva_path.replace(".csv" ,".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.join("main_dir" ,os.path.basename(__magic_name__ ) ) )
f.write(__magic_name__ ,arcname=os.path.join("main_dir" ,os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
snake_case_ : int = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(__magic_name__ ,"wb" ) as f:
snake_case_ : Any = pq.ParquetWriter(__magic_name__ ,schema=__magic_name__ )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__magic_name__ ) )] for k in DATA[0]} ,schema=__magic_name__ )
writer.write_table(__magic_name__ )
writer.close()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
snake_case_ : Optional[int] = {"data": DATA}
with open(__magic_name__ ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
snake_case_ : Dict = {"data": DATA_DICT_OF_LISTS}
with open(__magic_name__ ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(__magic_name__ ,"w" ) as f:
for item in DATA:
f.write(json.dumps(__magic_name__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(__magic_name__ ,"w" ) as f:
for item in DATA:
f.write(json.dumps(__magic_name__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(__magic_name__ ,"w" ) as f:
for item in DATA_312:
f.write(json.dumps(__magic_name__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(__magic_name__ ,"w" ) as f:
for item in DATA_STR:
f.write(json.dumps(__magic_name__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
import gzip
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(__magic_name__ ,"rb" ) as orig_file:
with gzip.open(__magic_name__ ,"wb" ) as zipped_file:
zipped_file.writelines(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
import gzip
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(__magic_name__ ,"rb" ) as orig_file:
with gzip.open(__magic_name__ ,"wb" ) as zipped_file:
zipped_file.writelines(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.join("nested" ,os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : List[str] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.join("main_dir" ,os.path.basename(__magic_name__ ) ) )
f.write(__magic_name__ ,arcname=os.path.join("main_dir" ,os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(__magic_name__ ,"w" ) as f:
f.add(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
f.add(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(__magic_name__ ,"w" ) as f:
f.add(__magic_name__ ,arcname=os.path.join("nested" ,os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = ["0", "1", "2", "3"]
snake_case_ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(__magic_name__ ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Tuple = ["0", "1", "2", "3"]
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(__magic_name__ ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = ["0", "1", "2", "3"]
snake_case_ : int = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(__magic_name__ ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : int = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : int = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.join("main_dir" ,os.path.basename(__magic_name__ ) ) )
f.write(__magic_name__ ,arcname=os.path.join("main_dir" ,os.path.basename(__magic_name__ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.basename("unsupported.ext" ) )
f.write(__magic_name__ ,arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(__magic_name__ ,"w" ,encoding="utf-8" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
return os.path.join("tests" ,"features" ,"data" ,"test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
return os.path.join("tests" ,"features" ,"data" ,"test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(__magic_name__ ,"w" ) as f:
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ) )
f.write(__magic_name__ ,arcname=os.path.basename(__magic_name__ ).replace(".jpg" ,"2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : int = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : Tuple = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : List[Any] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : int = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : int = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : int = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Optional[Any] = 8
else:
snake_case_ : Any = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : str = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : int = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[Any] = 2
# Initialize accelerator
snake_case_ : Any = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Union[str, Any] = config["lr"]
snake_case_ : Optional[Any] = int(config["num_epochs"] )
snake_case_ : List[str] = int(config["seed"] )
snake_case_ : int = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(__magic_name__ )
snake_case_, snake_case_ : Dict = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Any = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Tuple = AdamW(params=model.parameters() ,lr=__magic_name__ )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : Tuple = model(**__magic_name__ )
snake_case_ : str = outputs.loss
snake_case_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case_ : Any = 0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Dict = model(**__magic_name__ )
snake_case_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Any = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__magic_name__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case_ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : int = parser.parse_args()
snake_case_ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCamelCase : str = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : str = _TestCommandArgs(dataset=__magic_name__ ,all_configs=__magic_name__ ,save_infos=__magic_name__ )
snake_case_ : List[Any] = TestCommand(*__magic_name__ )
test_command.run()
snake_case_ : List[Any] = os.path.join(__magic_name__ ,"README.md" )
assert os.path.exists(__magic_name__ )
snake_case_ : Optional[Any] = DatasetInfosDict.from_directory(__magic_name__ )
snake_case_ : Any = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) ,splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] ,download_size=394_0680 ,dataset_size=258_9981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
snake_case_, snake_case_ : Optional[Any] = getattr(dataset_infos["default"] ,__magic_name__ ), getattr(expected_dataset_infos["default"] ,__magic_name__ )
if key == "num_bytes":
assert is_apercent_close(__magic_name__ ,__magic_name__ )
elif key == "splits":
assert list(__magic_name__ ) == list(__magic_name__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any]=14 , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Dict=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Tuple=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :Dict=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :List[Any]=None , ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : int = batch_size
snake_case_ : Optional[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Any = use_token_type_ids
snake_case_ : str = use_input_mask
snake_case_ : Optional[Any] = use_labels
snake_case_ : str = use_mc_token_ids
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = num_labels
snake_case_ : int = num_choices
snake_case_ : Tuple = scope
snake_case_ : List[str] = self.vocab_size - 1
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_input_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : int = None
if self.use_mc_token_ids:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
snake_case_ : Optional[int] = None
snake_case_ : Dict = None
snake_case_ : int = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Union[str, Any] = self.get_config()
snake_case_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , *lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = CTRLModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , *lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = CTRLLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[str] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Dict = config_and_inputs
snake_case_ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , *lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Optional[Any] = CTRLForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
def _A ( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Any = CTRLModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def _A ( self :Dict ) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
pass
@slow
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = CTRLModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=lowerCAmelCase__ ) # Legal the president is
snake_case_ : int = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
snake_case_ : Optional[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Any=18 , lowerCAmelCase__ :Optional[int]=30 , lowerCAmelCase__ :int=400 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Dict=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Tuple=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Union[str, Any]=False , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = size if size is not None else {"height": 20, "width": 20}
snake_case_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
snake_case_ : Tuple = parent
snake_case_ : List[Any] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Dict = min_resolution
snake_case_ : List[str] = max_resolution
snake_case_ : List[str] = do_resize
snake_case_ : Union[str, Any] = size
snake_case_ : List[str] = do_center_crop
snake_case_ : str = crop_size
snake_case_ : Dict = do_normalize
snake_case_ : Optional[int] = image_mean
snake_case_ : Union[str, Any] = image_std
snake_case_ : Any = do_reduce_labels
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
snake_case_ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
snake_case_ : Optional[Any] = Image.open(dataset[0]["file"] )
snake_case_ : Optional[Any] = Image.open(dataset[1]["file"] )
return image, map
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
snake_case_ : int = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
snake_case_ : Optional[int] = Image.open(ds[0]["file"] )
snake_case_ : List[str] = Image.open(ds[1]["file"] )
snake_case_ : List[Any] = Image.open(ds[2]["file"] )
snake_case_ : str = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = BeitImageProcessor if is_vision_available() else None
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = BeitImageProcessingTester(self )
@property
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase__ )
snake_case_ : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
pass
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ : List[str] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ : int = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
snake_case_ : Any = []
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
snake_case_ : Optional[int] = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_, snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : str = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_, snake_case_ : Optional[Any] = prepare_semantic_batch_inputs()
snake_case_ : int = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_, snake_case_ : Dict = prepare_semantic_single_inputs()
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
snake_case_ : Dict = True
snake_case_ : Any = image_processing(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( __magic_name__ ,__magic_name__="shi-labs/oneformer_demo" )-> Dict:
"""simple docstring"""
with open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) as f:
snake_case_ : Any = json.load(__magic_name__ )
snake_case_ : Any = {}
snake_case_ : List[Any] = []
snake_case_ : List[Any] = []
for key, info in class_info.items():
snake_case_ : List[Any] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__magic_name__ ) )
snake_case_ : Union[str, Any] = thing_ids
snake_case_ : Union[str, Any] = class_names
return metadata
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :str=7 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=30 , lowerCAmelCase__ :Any=400 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Dict=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :Tuple=255 , lowerCAmelCase__ :str="shi-labs/oneformer_demo" , lowerCAmelCase__ :str="ade20k_panoptic.json" , lowerCAmelCase__ :List[str]=10 , ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : str = num_channels
snake_case_ : Any = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : List[Any] = do_resize
snake_case_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : Tuple = image_mean
snake_case_ : List[str] = image_std
snake_case_ : int = class_info_file
snake_case_ : Optional[int] = prepare_metadata(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = num_text
snake_case_ : Optional[int] = repo_path
# for the post_process_functions
snake_case_ : Tuple = 2
snake_case_ : Optional[Any] = 10
snake_case_ : Any = 10
snake_case_ : Optional[Any] = 3
snake_case_ : str = 4
snake_case_ : str = num_labels
snake_case_ : int = do_reduce_labels
snake_case_ : List[str] = ignore_index
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> Optional[int]:
'''simple docstring'''
if not batched:
snake_case_ : List[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : List[str] = image.size
else:
snake_case_, snake_case_ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : Any = self.size["shortest_edge"]
elif w > h:
snake_case_ : Any = self.size["shortest_edge"]
snake_case_ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Any = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : List[Any] = []
for image in image_inputs:
snake_case_, snake_case_ : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
def _A ( self :str ) -> Tuple:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a__ = image_processing_class
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def _A ( self :List[str] ) -> int:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "ignore_index" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "class_info_file" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "num_text" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "repo_path" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "metadata" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_reduce_labels" ) )
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
def _A ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Optional[Any] = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : str = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Tuple = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Optional[int] = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Union[str, Any] = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Optional[int] , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Tuple="np" ) -> str:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
snake_case_ : List[str] = self.image_processing_tester.num_labels
snake_case_ : Union[str, Any] = None
snake_case_ : Dict = None
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
if with_segmentation_maps:
snake_case_ : str = num_labels
if is_instance_map:
snake_case_ : Tuple = list(range(lowerCAmelCase__ ) ) * 2
snake_case_ : Dict = dict(enumerate(lowerCAmelCase__ ) )
snake_case_ : str = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
snake_case_ : Union[str, Any] = [Image.fromarray(lowerCAmelCase__ ) for annotation in annotations]
snake_case_ : Union[str, Any] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , lowerCAmelCase__ , return_tensors="pt" , instance_id_to_semantic_id=lowerCAmelCase__ , pad_and_return_pixel_mask=lowerCAmelCase__ , )
return inputs
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
pass
def _A ( self :Any ) -> Dict:
'''simple docstring'''
def common(lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Union[str, Any]=None ):
snake_case_ : Union[str, Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase__ , is_instance_map=lowerCAmelCase__ , segmentation_type=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = inputs["mask_labels"]
snake_case_ : str = inputs["class_labels"]
snake_case_ : Tuple = inputs["pixel_values"]
snake_case_ : List[Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase__ )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = np.zeros((20, 50) )
snake_case_ : str = 1
snake_case_ : Any = 1
snake_case_ : Optional[Any] = 1
snake_case_ : Dict = binary_mask_to_rle(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ : int = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
snake_case_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
snake_case_ : Optional[int] = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ , target_sizes=lowerCAmelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case_ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ : str = image_processor.post_process_instance_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case_ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ : str = image_processor.post_process_panoptic_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ = 10 ,__magic_name__ = 1000 ,__magic_name__ = True )-> int:
"""simple docstring"""
assert (
isinstance(__magic_name__ ,__magic_name__ )
and isinstance(__magic_name__ ,__magic_name__ )
and isinstance(__magic_name__ ,__magic_name__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
assert (
isinstance(__magic_name__ ,__magic_name__ ) and isinstance(__magic_name__ ,__magic_name__ ) and isinstance(__magic_name__ ,__magic_name__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(__magic_name__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
snake_case_ : int = lower
snake_case_ : Union[str, Any] = higher
snake_case_ : List[Any] = []
while True:
snake_case_ : str = get_avg(__magic_name__ ,__magic_name__ )
last_numbers.append(__magic_name__ )
if answer(__magic_name__ ) == "low":
snake_case_ : Union[str, Any] = number
elif answer(__magic_name__ ) == "high":
snake_case_ : Union[str, Any] = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : str = int(input("Enter lower value : " ).strip() )
snake_case_ : Dict = int(input("Enter high value : " ).strip() )
snake_case_ : Any = int(input("Enter value to guess : " ).strip() )
guess_the_number(__magic_name__ ,__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
snake_case_ : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
snake_case_ : List[Any] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
# load decoder from hub
snake_case_ : str = "hf-internal-testing/ngram-beam-search-decoder"
def _A ( self :List[str] , **lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase__ )
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Optional[Any] = self.get_feature_extractor()
snake_case_ : str = self.get_decoder()
snake_case_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(lowerCAmelCase__ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.get_feature_extractor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Optional[Any] = self.get_decoder()
snake_case_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : Optional[Any] = floats_list((3, 1_000) )
snake_case_ : Tuple = feature_extractor(lowerCAmelCase__ , return_tensors="np" )
snake_case_ : List[str] = processor(lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_feature_extractor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : List[str] = self.get_decoder()
snake_case_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : List[str] = "This is a test string"
snake_case_ : int = processor(text=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict=(2, 10, 16) , lowerCAmelCase__ :List[str]=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(lowerCAmelCase__ )
return np.random.rand(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : int = self.get_feature_extractor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_decoder()
snake_case_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
snake_case_ : Union[str, Any] = processor.decode(lowerCAmelCase__ )
snake_case_ : Tuple = decoder.decode_beams(lowerCAmelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _A ( self :Tuple , lowerCAmelCase__ :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_feature_extractor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : List[str] = self.get_decoder()
snake_case_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case_ : int = processor.batch_decode(lowerCAmelCase__ )
else:
with get_context(lowerCAmelCase__ ).Pool() as pool:
snake_case_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[int] = list(lowerCAmelCase__ )
with get_context("fork" ).Pool() as p:
snake_case_ : List[str] = decoder.decode_beams_batch(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_ : List[str] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.lm_score )
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.get_feature_extractor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_decoder()
snake_case_ : str = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = self._get_dummy_logits()
snake_case_ : int = 15
snake_case_ : Optional[int] = -2_0.0
snake_case_ : Any = -4.0
snake_case_ : Any = processor.batch_decode(
lowerCAmelCase__ , beam_width=lowerCAmelCase__ , beam_prune_logp=lowerCAmelCase__ , token_min_logp=lowerCAmelCase__ , )
snake_case_ : int = decoded_processor_out.text
snake_case_ : Any = list(lowerCAmelCase__ )
with get_context("fork" ).Pool() as pool:
snake_case_ : Dict = decoder.decode_beams_batch(
lowerCAmelCase__ , lowerCAmelCase__ , beam_width=lowerCAmelCase__ , beam_prune_logp=lowerCAmelCase__ , token_min_logp=lowerCAmelCase__ , )
snake_case_ : Optional[int] = [d[0][0] for d in decoded_decoder_out]
snake_case_ : str = [d[0][2] for d in decoded_decoder_out]
snake_case_ : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , lowerCAmelCase__ )
self.assertTrue(np.array_equal(lowerCAmelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , lowerCAmelCase__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , lowerCAmelCase__ , atol=1E-3 ) )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.get_feature_extractor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : List[str] = self.get_decoder()
snake_case_ : Any = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : int = self._get_dummy_logits()
snake_case_ : List[str] = 2.0
snake_case_ : Any = 5.0
snake_case_ : int = -2_0.0
snake_case_ : Dict = True
snake_case_ : Optional[int] = processor.batch_decode(
lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , unk_score_offset=lowerCAmelCase__ , lm_score_boundary=lowerCAmelCase__ , )
snake_case_ : Union[str, Any] = decoded_processor_out.text
snake_case_ : List[str] = list(lowerCAmelCase__ )
decoder.reset_params(
alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , unk_score_offset=lowerCAmelCase__ , lm_score_boundary=lowerCAmelCase__ , )
with get_context("fork" ).Pool() as pool:
snake_case_ : Union[str, Any] = decoder.decode_beams_batch(
lowerCAmelCase__ , lowerCAmelCase__ , )
snake_case_ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , lowerCAmelCase__ )
snake_case_ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : str = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ : Tuple = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ : Optional[int] = os.listdir(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = snapshot_download("hf-internal-testing/processor_with_lm" )
snake_case_ : Any = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase__ )
snake_case_ : Dict = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ : List[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ : Tuple = os.listdir(lowerCAmelCase__ )
snake_case_ : int = os.listdir(lowerCAmelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : int = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : List[Any] = floats_list((3, 1_000) )
snake_case_ : int = processor_wavaveca(lowerCAmelCase__ , return_tensors="np" )
snake_case_ : Union[str, Any] = processor_auto(lowerCAmelCase__ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
snake_case_ : List[str] = self._get_dummy_logits()
snake_case_ : List[Any] = processor_wavaveca.batch_decode(lowerCAmelCase__ )
snake_case_ : Dict = processor_auto.batch_decode(lowerCAmelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_feature_extractor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[Any] = self.get_decoder()
snake_case_ : List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _A ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = [d[key] for d in offsets]
return retrieved_list
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : List[Any] = self._get_dummy_logits()[0]
snake_case_ : List[Any] = processor.decode(lowerCAmelCase__ , output_word_offsets=lowerCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _A ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ : List[Any] = self._get_dummy_logits()
snake_case_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ , output_word_offsets=lowerCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(lowerCAmelCase__ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ : Optional[int] = load_dataset("common_voice" , "en" , split="train" , streaming=lowerCAmelCase__ )
snake_case_ : Tuple = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
snake_case_ : str = iter(lowerCAmelCase__ )
snake_case_ : Dict = next(lowerCAmelCase__ )
snake_case_ : Dict = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
snake_case_ : Any = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case_ : List[Any] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
snake_case_ : int = model(lowerCAmelCase__ ).logits.cpu().numpy()
snake_case_ : Tuple = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case_ : List[str] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
snake_case_ : str = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(lowerCAmelCase__ , "word" ) ) , lowerCAmelCase__ )
self.assertEqual(" ".join(self.get_from_offsets(lowerCAmelCase__ , "word" ) ) , output.text )
# output times
snake_case_ : List[str] = torch.tensor(self.get_from_offsets(lowerCAmelCase__ , "start_time" ) )
snake_case_ : Any = torch.tensor(self.get_from_offsets(lowerCAmelCase__ , "end_time" ) )
# fmt: off
snake_case_ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
snake_case_ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=0.0_1 ) )
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A_ (datasets.BeamBasedBuilder ):
"""simple docstring"""
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCAmelCase__ , )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _A ( self :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
class A_ (datasets.BeamBasedBuilder ):
"""simple docstring"""
def _A ( self :Any ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCAmelCase__ , )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _A ( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A_ (a_ ):
"""simple docstring"""
@require_beam
def _A ( self :Optional[Any] ) -> Any:
'''simple docstring'''
snake_case_ : str = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _A ( self :str ) -> Optional[int]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Optional[int] = beam.io.parquetio.WriteToParquet
snake_case_ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : Union[str, Any] = partial(lowerCAmelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Any = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Union[str, Any] = DummyBeamDataset(cache_dir=lowerCAmelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _A ( self :str ) -> str:
'''simple docstring'''
snake_case_ : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = NestedBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return number | (1 << position)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return number & ~(1 << position)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return number ^ (1 << position)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {'''vocab_file''': '''spiece.model'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__lowerCamelCase : Optional[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__lowerCamelCase : int = 0
__lowerCamelCase : str = 1
__lowerCamelCase : Dict = 2
__lowerCamelCase : Tuple = 3
__lowerCamelCase : Optional[Any] = 4
class A_ (a_ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = '''left'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Optional[Any]="<s>" , lowerCAmelCase__ :Union[str, Any]="</s>" , lowerCAmelCase__ :Optional[Any]="<unk>" , lowerCAmelCase__ :int="<sep>" , lowerCAmelCase__ :int="<pad>" , lowerCAmelCase__ :Dict="<cls>" , lowerCAmelCase__ :Optional[int]="<mask>" , lowerCAmelCase__ :Any=["<eop>", "<eod>"] , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :Tuple , ) -> None:
'''simple docstring'''
snake_case_ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
snake_case_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case_ : Dict = 3
snake_case_ : str = do_lower_case
snake_case_ : int = remove_space
snake_case_ : int = keep_accents
snake_case_ : List[str] = vocab_file
snake_case_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def _A ( self :int ) -> Dict:
'''simple docstring'''
return len(self.sp_model )
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.__dict__.copy()
snake_case_ : int = None
return state
def __setstate__( self :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ : Optional[int] = {}
snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
if self.remove_space:
snake_case_ : Any = " ".join(inputs.strip().split() )
else:
snake_case_ : Any = inputs
snake_case_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
snake_case_ : List[str] = unicodedata.normalize("NFKD" , lowerCAmelCase__ )
snake_case_ : Tuple = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__ )] )
if self.do_lower_case:
snake_case_ : Union[str, Any] = outputs.lower()
return outputs
def _A ( self :str , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.preprocess_text(lowerCAmelCase__ )
snake_case_ : str = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
snake_case_ : List[Any] = []
for piece in pieces:
if len(lowerCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
snake_case_ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ : Union[str, Any] = cur_pieces[1:]
else:
snake_case_ : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase__ )
else:
new_pieces.append(lowerCAmelCase__ )
return new_pieces
def _A ( self :str , lowerCAmelCase__ :Optional[Any] ) -> str:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def _A ( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :str , ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.pop("use_source_tokenizer" , lowerCAmelCase__ )
snake_case_ : Tuple = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
snake_case_ : int = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
snake_case_ : Optional[int] = "".join(lowerCAmelCase__ )
snake_case_ : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ : List[str] = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def _A ( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _A ( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1]
return ([0] * len(lowerCAmelCase__ )) + [1, 1]
def _A ( self :Any , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _A ( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
snake_case_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( __magic_name__ )-> List[str]:
"""simple docstring"""
for param in module.parameters():
snake_case_ : List[str] = False
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case_ : Optional[Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Any = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
snake_case_ : Any = datetime.now()
snake_case_ : List[str] = current_time.strftime("%H:%M:%S" )
return timestamp
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : List[Any] = '''Muhammad Umer Farooq'''
__lowerCamelCase : Optional[Any] = '''MIT'''
__lowerCamelCase : List[Any] = '''1.0.0'''
__lowerCamelCase : List[Any] = '''Muhammad Umer Farooq'''
__lowerCamelCase : Tuple = '''contact@muhammadumerfarooq.me'''
__lowerCamelCase : Optional[int] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
super().__init__()
snake_case_ : list[str] = []
snake_case_ : Optional[Any] = domain
def _A ( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case_ : Tuple = parse.urljoin(self.domain , lowerCAmelCase__ )
self.urls.append(lowerCAmelCase__ )
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(__magic_name__ ).split("." )[-2:] )
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
return parse.urlparse(__magic_name__ ).netloc
def __UpperCAmelCase ( __magic_name__ = "https://github.com" )-> list[str]:
"""simple docstring"""
snake_case_ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case_ : Dict = Parser(__magic_name__ )
try:
# Open URL
snake_case_ : int = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case_ : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case_ : List[Any] = requests.get(__magic_name__ )
# Get the valid email.
snake_case_ : Any = re.findall("[a-zA-Z0-9]+@" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : str = emails_from_url('''https://github.com''')
print(f'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
from manim import *
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
snake_case_ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 )
snake_case_ : Union[str, Any] = [mem.copy() for i in range(6 )]
snake_case_ : Optional[int] = [mem.copy() for i in range(6 )]
snake_case_ : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : List[str] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : List[Any] = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : int = Text("CPU" , font_size=24 )
snake_case_ : List[Any] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
snake_case_ : Any = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : str = Text("GPU" , font_size=24 )
snake_case_ : Optional[Any] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = [mem.copy() for i in range(6 )]
snake_case_ : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : str = Text("Model" , font_size=24 )
snake_case_ : List[str] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
snake_case_ : int = []
snake_case_ : str = []
for i, rect in enumerate(lowerCAmelCase__ ):
snake_case_ : int = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.8 )
target.move_to(lowerCAmelCase__ )
model_arr.append(lowerCAmelCase__ )
snake_case_ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
snake_case_ : str = [meta_mem.copy() for i in range(6 )]
snake_case_ : List[str] = [meta_mem.copy() for i in range(6 )]
snake_case_ : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : Optional[int] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : Dict = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ : List[Any] = Text("Disk" , font_size=24 )
snake_case_ : List[str] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4, -1.2_5, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Dict = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
snake_case_ : List[str] = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) )
snake_case_ : List[str] = Square(0.3 )
input.set_fill(lowerCAmelCase__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCAmelCase__ , buff=0.5 )
self.play(Write(lowerCAmelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCAmelCase__ , buff=0.0_2 )
self.play(MoveToTarget(lowerCAmelCase__ ) )
self.play(FadeOut(lowerCAmelCase__ ) )
snake_case_ : Any = Arrow(start=lowerCAmelCase__ , end=lowerCAmelCase__ , color=lowerCAmelCase__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCAmelCase__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case_ : Optional[int] = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
snake_case_ : Any = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(lowerCAmelCase__ ) , Circumscribe(model_arr[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(model_cpu_arr[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case_ : Union[str, Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , lowerCAmelCase__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
snake_case_ : Union[str, Any] = AnimationGroup(
FadeOut(lowerCAmelCase__ , run_time=0.5 ) , MoveToTarget(lowerCAmelCase__ , run_time=0.5 ) , FadeIn(lowerCAmelCase__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCAmelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case_ : int = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[i] , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(model_arr[i + 1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case_ : Optional[Any] = a_c
snake_case_ : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(lowerCAmelCase__ ) , FadeOut(lowerCAmelCase__ , run_time=0.5 ) , )
snake_case_ : str = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , MoveToTarget(lowerCAmelCase__ ) )
self.wait()
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__lowerCamelCase : Any = False
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any , lowerCAmelCase__ :Union[str, Any]=32 ) -> str:
'''simple docstring'''
set_seed(0 )
snake_case_ : Tuple = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
snake_case_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
snake_case_ : Dict = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
snake_case_ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
snake_case_ : Dict = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case_ : Any = [torch.randn((4, 3, 32, 32) ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case_ : List[str] = [torch.randint(0 , 1_000 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
snake_case_, snake_case_ : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : Any = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case_ : str = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
snake_case_, snake_case_ : Tuple = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case_ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case_ : Union[str, Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case_ : Optional[Any] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''vit_msn'''
def __init__( self :str , lowerCAmelCase__ :Optional[Any]=768 , lowerCAmelCase__ :List[str]=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :int=3_072 , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :Tuple=1E-0_6 , lowerCAmelCase__ :Dict=224 , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :List[str]=3 , lowerCAmelCase__ :Optional[int]=True , **lowerCAmelCase__ :Tuple , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Optional[int] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Any = initializer_range
snake_case_ : Tuple = layer_norm_eps
snake_case_ : int = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = qkv_bias
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : int = BlipImageProcessor()
snake_case_ : Dict = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case_ : Tuple = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
snake_case_ : List[Any] = InstructBlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _A ( self :str , **lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def _A ( self :Tuple , **lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def _A ( self :Tuple , **lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).qformer_tokenizer
def _A ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self :Any ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : Dict = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case_ : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
snake_case_ : int = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase__ )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_qformer_tokenizer()
snake_case_ : Optional[Any] = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
snake_case_ : List[str] = self.prepare_image_inputs()
snake_case_ : Optional[Any] = image_processor(lowerCAmelCase__ , return_tensors="np" )
snake_case_ : int = processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : str = self.get_qformer_tokenizer()
snake_case_ : List[str] = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
snake_case_ : Optional[Any] = "lower newer"
snake_case_ : List[str] = processor(text=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = qformer_tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : List[str] = self.get_qformer_tokenizer()
snake_case_ : str = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = "lower newer"
snake_case_ : Tuple = self.prepare_image_inputs()
snake_case_ : Optional[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Optional[Any] = self.get_qformer_tokenizer()
snake_case_ : Dict = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
snake_case_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : str = processor.batch_decode(lowerCAmelCase__ )
snake_case_ : int = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_qformer_tokenizer()
snake_case_ : Any = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
snake_case_ : List[str] = "lower newer"
snake_case_ : str = self.prepare_image_inputs()
snake_case_ : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''beit'''
def __init__( self :int , lowerCAmelCase__ :Dict=8_192 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :str=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :Union[str, Any]=224 , lowerCAmelCase__ :int=16 , lowerCAmelCase__ :List[str]=3 , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Any=[3, 5, 7, 11] , lowerCAmelCase__ :Any=[1, 2, 3, 6] , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Tuple=0.4 , lowerCAmelCase__ :List[str]=256 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=255 , **lowerCAmelCase__ :str , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : List[str] = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : List[Any] = initializer_range
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : Tuple = num_channels
snake_case_ : List[str] = use_mask_token
snake_case_ : Optional[int] = use_absolute_position_embeddings
snake_case_ : Any = use_relative_position_bias
snake_case_ : Tuple = use_shared_relative_position_bias
snake_case_ : int = layer_scale_init_value
snake_case_ : Optional[int] = drop_path_rate
snake_case_ : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ : int = out_indices
snake_case_ : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ : Tuple = use_auxiliary_head
snake_case_ : Tuple = auxiliary_loss_weight
snake_case_ : str = auxiliary_channels
snake_case_ : str = auxiliary_num_convs
snake_case_ : Tuple = auxiliary_concat_input
snake_case_ : Optional[Any] = semantic_loss_ignore_index
class A_ (a_ ):
"""simple docstring"""
a__ = version.parse('''1.11''' )
@property
def _A ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self :Dict ) -> float:
'''simple docstring'''
return 1E-4
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = tempfile.mkdtemp()
snake_case_ : Dict = BlipImageProcessor()
snake_case_ : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
snake_case_ : Any = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _A ( self :Any , **lowerCAmelCase__ :List[str] ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def _A ( self :Any , **lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self :Optional[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : str = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case_ : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
snake_case_ : List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : List[str] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Optional[int] = self.prepare_image_inputs()
snake_case_ : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="np" )
snake_case_ : int = processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : Tuple = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Optional[int] = "lower newer"
snake_case_ : Tuple = processor(text=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self :str ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_image_processor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : List[str] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : int = "lower newer"
snake_case_ : List[str] = self.prepare_image_inputs()
snake_case_ : str = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : Any = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ )
snake_case_ : str = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.get_image_processor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Optional[int] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Optional[int] = "lower newer"
snake_case_ : Tuple = self.prepare_image_inputs()
snake_case_ : int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A_ (a_ ):
"""simple docstring"""
a__ = None
a__ = None
a__ = None
a__ = None
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Optional[int] , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Union[str, Any]=0 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :Any="cls" , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Optional[int] = project_dim
snake_case_ : str = pooler_fn
snake_case_ : Union[str, Any] = learn_encoder
snake_case_ : Optional[Any] = use_attention_mask
class A_ (a_ ):
"""simple docstring"""
a__ = [r'''pooler''', r'''logit_scale''']
a__ = [r'''position_ids''', r'''predictions.decoder.bias''']
a__ = '''roberta'''
a__ = RobertaSeriesConfig
def __init__( self :Any , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
snake_case_ : Optional[int] = XLMRobertaModel(lowerCAmelCase__ )
snake_case_ : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
snake_case_ : Optional[int] = getattr(lowerCAmelCase__ , "has_pre_transformation" , lowerCAmelCase__ )
if self.has_pre_transformation:
snake_case_ : int = nn.Linear(config.hidden_size , config.project_dim )
snake_case_ : Any = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> str:
'''simple docstring'''
snake_case_ : int = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Union[str, Any] = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
snake_case_ : Union[str, Any] = outputs["hidden_states"][-2]
snake_case_ : Tuple = self.pre_LN(lowerCAmelCase__ )
snake_case_ : Tuple = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
snake_case_ : str = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase : int = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
snake_case_ : str = ksize + 1
snake_case_ : Any = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__magic_name__ ):
for x in range(__magic_name__ ):
# distance from center
snake_case_ : Optional[int] = x - ksize // 2
snake_case_ : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case_ : str = theta / 180 * np.pi
snake_case_ : List[str] = np.cos(_theta )
snake_case_ : List[Any] = np.sin(_theta )
# get kernel x
snake_case_ : str = cos_theta * px + sin_theta * py
# get kernel y
snake_case_ : Dict = -sin_theta * px + cos_theta * py
# fill kernel
snake_case_ : List[str] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase : Dict = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase : Dict = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowerCamelCase : Union[str, Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase : Any = out / out.max() * 255
__lowerCamelCase : List[Any] = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : str = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
snake_case_ : Union[str, Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw ).convert("RGB" )
return image
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : str = dct.pop(__magic_name__ )
snake_case_ : Dict = val
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ : str = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
snake_case_ : Union[str, Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
snake_case_ : int = torch.cat((q_bias, torch.zeros_like(__magic_name__ ,requires_grad=__magic_name__ ), v_bias) )
snake_case_ : Optional[Any] = qkv_bias
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Tuple = 364 if "coco" in model_name else 224
snake_case_ : Any = BlipaVisionConfig(image_size=__magic_name__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case_ : Any = OPTConfig.from_pretrained("facebook/opt-2.7b" ,eos_token_id=__magic_name__ ).to_dict()
elif "opt-6.7b" in model_name:
snake_case_ : Tuple = OPTConfig.from_pretrained("facebook/opt-6.7b" ,eos_token_id=__magic_name__ ).to_dict()
elif "t5-xl" in model_name:
snake_case_ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ : Any = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
snake_case_ : str = BlipaConfig(vision_config=__magic_name__ ,text_config=__magic_name__ )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=None ,__magic_name__=False )-> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
snake_case_ : List[Any] = tokenizer("\n" ,add_special_tokens=__magic_name__ ).input_ids[0]
snake_case_, snake_case_ : Optional[Any] = get_blipa_config(__magic_name__ ,eos_token_id=__magic_name__ )
snake_case_ : Tuple = BlipaForConditionalGeneration(__magic_name__ ).eval()
snake_case_ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
snake_case_, snake_case_ : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
snake_case_ : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
snake_case_, snake_case_, snake_case_ : Any = load_model_and_preprocess(
name=__magic_name__ ,model_type=__magic_name__ ,is_eval=__magic_name__ ,device=__magic_name__ )
original_model.eval()
print("Done!" )
# update state dict keys
snake_case_ : Tuple = original_model.state_dict()
snake_case_ : Optional[int] = create_rename_keys(__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ : Optional[int] = state_dict.pop(__magic_name__ )
if key.startswith("Qformer.bert" ):
snake_case_ : List[Any] = key.replace("Qformer.bert" ,"qformer" )
if "attention.self" in key:
snake_case_ : List[str] = key.replace("self" ,"attention" )
if "opt_proj" in key:
snake_case_ : List[Any] = key.replace("opt_proj" ,"language_projection" )
if "t5_proj" in key:
snake_case_ : Any = key.replace("t5_proj" ,"language_projection" )
if key.startswith("opt" ):
snake_case_ : List[Any] = key.replace("opt" ,"language" )
if key.startswith("t5" ):
snake_case_ : List[str] = key.replace("t5" ,"language" )
snake_case_ : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(__magic_name__ ,__magic_name__ )
snake_case_, snake_case_ : Optional[Any] = hf_model.load_state_dict(__magic_name__ ,strict=__magic_name__ )
assert len(__magic_name__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case_ : List[str] = load_demo_image()
snake_case_ : int = vis_processors["eval"](__magic_name__ ).unsqueeze(0 ).to(__magic_name__ )
snake_case_ : Tuple = tokenizer(["\n"] ,return_tensors="pt" ).input_ids.to(__magic_name__ )
# create processor
snake_case_ : List[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} ,image_mean=__magic_name__ ,image_std=__magic_name__ )
snake_case_ : Dict = BlipaProcessor(image_processor=__magic_name__ ,tokenizer=__magic_name__ )
snake_case_ : Optional[Any] = processor(images=__magic_name__ ,return_tensors="pt" ).pixel_values.to(__magic_name__ )
# make sure processor creates exact same pixel values
assert torch.allclose(__magic_name__ ,__magic_name__ )
original_model.to(__magic_name__ )
hf_model.to(__magic_name__ )
with torch.no_grad():
if "opt" in model_name:
snake_case_ : Union[str, Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
snake_case_ : List[str] = hf_model(__magic_name__ ,__magic_name__ ).logits
else:
snake_case_ : Optional[int] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
snake_case_ : str = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-100 )
snake_case_ : Optional[Any] = hf_model(__magic_name__ ,__magic_name__ ,labels=__magic_name__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" ,original_logits[0, :3, :3] )
print("First values of HF logits:" ,logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case_ : str = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] ,device=__magic_name__ )
assert torch.allclose(logits[0, :3, :3] ,__magic_name__ ,atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case_ : List[str] = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] ,device=__magic_name__ )
else:
# cast to same type
snake_case_ : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(__magic_name__ ) ,__magic_name__ ,atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
snake_case_ : List[str] = ""
snake_case_ : Optional[int] = tokenizer(__magic_name__ ,return_tensors="pt" ).input_ids.to(__magic_name__ )
snake_case_ : str = original_model.generate({"image": original_pixel_values} )
snake_case_ : str = hf_model.generate(
__magic_name__ ,__magic_name__ ,do_sample=__magic_name__ ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print("Original generation:" ,__magic_name__ )
snake_case_ : Any = input_ids.shape[1]
snake_case_ : Optional[Any] = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=__magic_name__ )
snake_case_ : int = [text.strip() for text in output_text]
print("HF generation:" ,__magic_name__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__magic_name__ )
hf_model.save_pretrained(__magic_name__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
__lowerCamelCase : List[str] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__lowerCamelCase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[int]]:
"""simple docstring"""
snake_case_ : list[list[int]] = []
create_all_state(1 ,__magic_name__ ,__magic_name__ ,[] ,__magic_name__ )
return result
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)-> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ ,total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 ,__magic_name__ ,level - 1 ,__magic_name__ ,__magic_name__ )
current_list.pop()
def __UpperCAmelCase ( __magic_name__ )-> None:
"""simple docstring"""
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : List[str] = 4
__lowerCamelCase : Dict = 2
__lowerCamelCase : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''efficientformer'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[int] = [3, 2, 6, 4] , lowerCAmelCase__ :List[int] = [48, 96, 224, 448] , lowerCAmelCase__ :List[bool] = [True, True, True, True] , lowerCAmelCase__ :int = 448 , lowerCAmelCase__ :int = 32 , lowerCAmelCase__ :int = 4 , lowerCAmelCase__ :int = 7 , lowerCAmelCase__ :int = 5 , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :int = 4 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :int = 16 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :float = 1E-5 , lowerCAmelCase__ :str = "gelu" , lowerCAmelCase__ :float = 0.0_2 , lowerCAmelCase__ :float = 1E-1_2 , lowerCAmelCase__ :int = 224 , lowerCAmelCase__ :float = 1E-0_5 , **lowerCAmelCase__ :Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : List[Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Optional[Any] = hidden_sizes
snake_case_ : int = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[Any] = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Dict = depths
snake_case_ : int = mlp_expansion_ratio
snake_case_ : str = downsamples
snake_case_ : Optional[int] = dim
snake_case_ : List[Any] = key_dim
snake_case_ : Tuple = attention_ratio
snake_case_ : Any = resolution
snake_case_ : Union[str, Any] = pool_size
snake_case_ : str = downsample_patch_size
snake_case_ : Dict = downsample_stride
snake_case_ : str = downsample_pad
snake_case_ : Tuple = drop_path_rate
snake_case_ : Optional[int] = num_metaad_blocks
snake_case_ : int = distillation
snake_case_ : Union[str, Any] = use_layer_scale
snake_case_ : str = layer_scale_init_value
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = batch_norm_eps
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str=13 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :str=32 , lowerCAmelCase__ :str=5 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :Tuple=0.9 , lowerCAmelCase__ :Tuple=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = parent
snake_case_ : Dict = batch_size
snake_case_ : Tuple = image_size
snake_case_ : List[str] = num_channels
snake_case_ : str = patch_size
snake_case_ : int = tubelet_size
snake_case_ : Any = num_frames
snake_case_ : Optional[Any] = is_training
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : int = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[str] = mask_ratio
snake_case_ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case_ : int = int(mask_ratio * self.seq_length )
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _A ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = VideoMAEModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = VideoMAEForPreTraining(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ : Any = torch.ones((self.num_masks,) )
snake_case_ : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case_ : int = mask.expand(self.batch_size , -1 ).bool()
snake_case_ : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# model only returns predictions for masked patches
snake_case_ : Dict = mask.sum().item()
snake_case_ : List[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ : List[Any] = config_and_inputs
snake_case_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a__ = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = VideoMAEModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str=False ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = copy.deepcopy(lowerCAmelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ : Dict = torch.ones((self.model_tester.num_masks,) )
snake_case_ : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case_ : Optional[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case_ : Dict = bool_masked_pos.to(lowerCAmelCase__ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ )
snake_case_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[Any] = [*signature.parameters.keys()]
snake_case_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
@slow
def _A ( self :int ) -> List[str]:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = VideoMAEModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = True
for model_class in self.all_model_classes:
snake_case_ : Any = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ : Dict = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case_ : Optional[Any] = True
snake_case_ : List[str] = False
snake_case_ : str = True
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Dict = True
snake_case_ : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case_ : Any = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
snake_case_ : str = True
snake_case_ : List[str] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
snake_case_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _A ( self :int ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ):
snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Optional[int] = outputs.hidden_states
snake_case_ : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
snake_case_ : Dict = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ : Optional[int] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
pass
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
snake_case_ : int = np.load(__magic_name__ )
return list(__magic_name__ )
@require_torch
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Tuple ) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : str = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Union[str, Any] = prepare_video()
snake_case_ : List[str] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case_ : Tuple = model(**lowerCAmelCase__ )
# verify the logits
snake_case_ : Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case_ : Tuple = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(lowerCAmelCase__ )
snake_case_ : Tuple = self.default_image_processor
snake_case_ : Dict = prepare_video()
snake_case_ : Union[str, Any] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# add boolean mask, indicating which patches to mask
snake_case_ : Optional[int] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
snake_case_ : Dict = torch.load(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case_ : Any = model(**lowerCAmelCase__ )
# verify the logits
snake_case_ : str = torch.Size([1, 1_408, 1_536] )
snake_case_ : List[Any] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase__ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case_ : Any = torch.tensor([0.5_1_4_2] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case_ : Tuple = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=lowerCAmelCase__ ).to(
lowerCAmelCase__ )
with torch.no_grad():
snake_case_ : int = model(**lowerCAmelCase__ )
snake_case_ : List[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4 ) )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import math
import sys
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[Any] = ""
try:
with open(__magic_name__ ,"rb" ) as binary_file:
snake_case_ : str = binary_file.read()
for dat in data:
snake_case_ : List[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = {"0": "0", "1": "1"}
snake_case_, snake_case_ : Union[str, Any] = "", ""
snake_case_ : List[Any] = len(__magic_name__ )
for i in range(len(__magic_name__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case_ : Optional[Any] = lexicon[curr_string]
result += last_match_id
snake_case_ : int = last_match_id + "0"
if math.loga(__magic_name__ ).is_integer():
snake_case_ : str = {}
for curr_key in list(__magic_name__ ):
snake_case_ : Union[str, Any] = lexicon.pop(__magic_name__ )
snake_case_ : Optional[int] = new_lex
snake_case_ : Tuple = last_match_id + "1"
index += 1
snake_case_ : Any = ""
return result
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
snake_case_ : Any = 8
try:
with open(__magic_name__ ,"wb" ) as opened_file:
snake_case_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 ,len(__magic_name__ ) ,__magic_name__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__magic_name__ ,2 ).to_bytes(1 ,byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case_ : List[Any] = data_bits[counter:]
snake_case_ : List[str] = data_bits[counter + 1 :]
return data_bits
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
snake_case_ : Optional[Any] = read_file_binary(__magic_name__ )
snake_case_ : Dict = remove_prefix(__magic_name__ )
snake_case_ : Union[str, Any] = decompress_data(__magic_name__ )
write_file_binary(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = None
a__ = None
__lowerCamelCase : Tuple = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
snake_case_, snake_case_ : Dict = get_distrib(node.left )
snake_case_, snake_case_ : Tuple = get_distrib(node.right )
snake_case_ : str = 1 - left_distrib_excess
snake_case_ : List[str] = 1 - right_distrib_excess
snake_case_ : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
snake_case_ : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ ,__magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :str=18 , lowerCAmelCase__ :Optional[Any]=30 , lowerCAmelCase__ :Optional[Any]=400 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Union[str, Any]=True , ) -> List[str]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"shortest_edge": 20}
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Dict = min_resolution
snake_case_ : List[str] = max_resolution
snake_case_ : Any = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Union[str, Any] = do_center_crop
snake_case_ : int = crop_size
snake_case_ : List[str] = do_flip_channel_order
def _A ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = MobileViTImageProcessor if is_vision_available() else None
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : int = MobileViTImageProcessingTester(self )
@property
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
snake_case_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
pass
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ : List[str] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ : Dict = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : int = 0
for ch in input_str:
snake_case_ : Any = ord(__magic_name__ )
snake_case_ : Union[str, Any] = pow(2 ,__magic_name__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
snake_case_ : str = [144, 192, 240]
snake_case_ : str = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
snake_case_ : int = [96, 120, 144]
snake_case_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
snake_case_ : Dict = [64, 80, 96]
snake_case_ : Union[str, Any] = [16, 16, 24, 48, 64, 80, 320]
snake_case_ : int = 0.05
snake_case_ : Any = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
snake_case_ : Tuple = 512
snake_case_ : str = 16
snake_case_ : List[str] = 21
snake_case_ : List[Any] = "pascal-voc-id2label.json"
else:
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = "imagenet-1k-id2label.json"
snake_case_ : Dict = "huggingface/label-files"
snake_case_ : str = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Optional[Any] = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> str:
"""simple docstring"""
for i in range(1 ,6 ):
if F'''layer_{i}.''' in name:
snake_case_ : Union[str, Any] = name.replace(F'''layer_{i}.''' ,F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
snake_case_ : Any = name.replace("conv_1." ,"conv_stem." )
if ".block." in name:
snake_case_ : int = name.replace(".block." ,"." )
if "exp_1x1" in name:
snake_case_ : int = name.replace("exp_1x1" ,"expand_1x1" )
if "red_1x1" in name:
snake_case_ : Tuple = name.replace("red_1x1" ,"reduce_1x1" )
if ".local_rep.conv_3x3." in name:
snake_case_ : Union[str, Any] = name.replace(".local_rep.conv_3x3." ,".conv_kxk." )
if ".local_rep.conv_1x1." in name:
snake_case_ : List[Any] = name.replace(".local_rep.conv_1x1." ,".conv_1x1." )
if ".norm." in name:
snake_case_ : Tuple = name.replace(".norm." ,".normalization." )
if ".conv." in name:
snake_case_ : Any = name.replace(".conv." ,".convolution." )
if ".conv_proj." in name:
snake_case_ : Union[str, Any] = name.replace(".conv_proj." ,".conv_projection." )
for i in range(0 ,2 ):
for j in range(0 ,4 ):
if F'''.{i}.{j}.''' in name:
snake_case_ : List[str] = name.replace(F'''.{i}.{j}.''' ,F'''.{i}.layer.{j}.''' )
for i in range(2 ,6 ):
for j in range(0 ,4 ):
if F'''.{i}.{j}.''' in name:
snake_case_ : Any = name.replace(F'''.{i}.{j}.''' ,F'''.{i}.''' )
if "expand_1x1" in name:
snake_case_ : List[Any] = name.replace("expand_1x1" ,"downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
snake_case_ : Any = name.replace("conv_3x3" ,"downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
snake_case_ : str = name.replace("reduce_1x1" ,"downsampling_layer.reduce_1x1" )
for i in range(2 ,5 ):
if F'''.global_rep.{i}.weight''' in name:
snake_case_ : List[str] = name.replace(F'''.global_rep.{i}.weight''' ,".layernorm.weight" )
if F'''.global_rep.{i}.bias''' in name:
snake_case_ : Optional[Any] = name.replace(F'''.global_rep.{i}.bias''' ,".layernorm.bias" )
if ".global_rep." in name:
snake_case_ : List[str] = name.replace(".global_rep." ,".transformer." )
if ".pre_norm_mha.0." in name:
snake_case_ : Optional[Any] = name.replace(".pre_norm_mha.0." ,".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
snake_case_ : Optional[int] = name.replace(".pre_norm_mha.1.out_proj." ,".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
snake_case_ : Any = name.replace(".pre_norm_ffn.0." ,".layernorm_after." )
if ".pre_norm_ffn.1." in name:
snake_case_ : Optional[int] = name.replace(".pre_norm_ffn.1." ,".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
snake_case_ : str = name.replace(".pre_norm_ffn.4." ,".output.dense." )
if ".transformer." in name:
snake_case_ : Optional[int] = name.replace(".transformer." ,".transformer.layer." )
if ".aspp_layer." in name:
snake_case_ : List[str] = name.replace(".aspp_layer." ,"." )
if ".aspp_pool." in name:
snake_case_ : List[Any] = name.replace(".aspp_pool." ,"." )
if "seg_head." in name:
snake_case_ : List[str] = name.replace("seg_head." ,"segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
snake_case_ : List[Any] = name.replace("segmentation_head.classifier.classifier." ,"segmentation_head.classifier." )
if "classifier.fc." in name:
snake_case_ : Optional[Any] = name.replace("classifier.fc." ,"classifier." )
elif (not base_model) and ("segmentation_head." not in name):
snake_case_ : Optional[int] = "mobilevit." + name
return name
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=False )-> str:
"""simple docstring"""
if base_model:
snake_case_ : List[str] = ""
else:
snake_case_ : Dict = "mobilevit."
for key in orig_state_dict.copy().keys():
snake_case_ : int = orig_state_dict.pop(__magic_name__ )
if key[:8] == "encoder.":
snake_case_ : List[Any] = key[8:]
if "qkv" in key:
snake_case_ : List[str] = key.split("." )
snake_case_ : Union[str, Any] = int(key_split[0][6:] ) - 1
snake_case_ : Any = int(key_split[3] )
snake_case_ : str = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
snake_case_ : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
snake_case_ : int = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
snake_case_ : int = val[:dim, :]
snake_case_ : Dict = val[dim : dim * 2, :]
snake_case_ : Union[str, Any] = val[-dim:, :]
else:
snake_case_ : int = val[:dim]
snake_case_ : Optional[int] = val[dim : dim * 2]
snake_case_ : Union[str, Any] = val[-dim:]
else:
snake_case_ : Dict = val
return orig_state_dict
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Tuple = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=False )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = get_mobilevit_config(__magic_name__ )
# load original state_dict
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
snake_case_ : int = MobileViTForSemanticSegmentation(__magic_name__ ).eval()
else:
snake_case_ : List[str] = MobileViTForImageClassification(__magic_name__ ).eval()
snake_case_ : int = convert_state_dict(__magic_name__ ,__magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case_ : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
snake_case_ : List[str] = image_processor(images=prepare_img() ,return_tensors="pt" )
snake_case_ : Optional[Any] = model(**__magic_name__ )
snake_case_ : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
snake_case_ : Optional[int] = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
snake_case_ : Optional[Any] = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
snake_case_ : Union[str, Any] = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] ,__magic_name__ ,atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
snake_case_ : Tuple = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
snake_case_ : List[Any] = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
snake_case_ : List[str] = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] ,__magic_name__ ,atol=1E-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
snake_case_ : Any = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
snake_case_ : Optional[Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(__magic_name__ ,organization="apple" )
model.push_to_hub(__magic_name__ ,organization="apple" )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ (a_ ):
"""simple docstring"""
a__ = '''Salesforce/blip-image-captioning-base'''
a__ = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
a__ = '''image_captioner'''
a__ = AutoModelForVisionaSeq
a__ = ['''image''']
a__ = ['''text''']
def __init__( self :Union[str, Any] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :"Image" ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(images=lowerCAmelCase__ , return_tensors="pt" )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0].strip()
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__magic_name__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list:
"""simple docstring"""
snake_case_ : List[str] = [0] * len(__magic_name__ )
for i in range(1 ,len(__magic_name__ ) ):
# use last results for better performance - dynamic programming
snake_case_ : List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case_ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case_ : int = j
return prefix_result
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
return max(prefix_function(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''longformer'''
def __init__( self :str , lowerCAmelCase__ :Union[List[int], int] = 512 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 30_522 , lowerCAmelCase__ :int = 768 , lowerCAmelCase__ :int = 12 , lowerCAmelCase__ :int = 12 , lowerCAmelCase__ :int = 3_072 , lowerCAmelCase__ :str = "gelu" , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :int = 512 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :float = 0.0_2 , lowerCAmelCase__ :float = 1E-1_2 , lowerCAmelCase__ :bool = False , **lowerCAmelCase__ :Union[str, Any] , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Optional[Any] = attention_window
snake_case_ : int = sep_token_id
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : Optional[Any] = eos_token_id
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : int = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : str = hidden_act
snake_case_ : Any = intermediate_size
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Any = onnx_export
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :"PretrainedConfig" , lowerCAmelCase__ :str = "default" , lowerCAmelCase__ :"List[PatchingSpec]" = None ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : int = True
@property
def _A ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _A ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : Dict = super().outputs
if self.task == "default":
snake_case_ : int = {0: "batch"}
return outputs
@property
def _A ( self :Union[str, Any] ) -> float:
'''simple docstring'''
return 1E-4
@property
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def _A ( self :int , lowerCAmelCase__ :"PreTrainedTokenizerBase" , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case_ : Union[str, Any] = torch.zeros_like(inputs["input_ids"] )
# make every second token global
snake_case_ : Any = 1
return inputs
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Dict = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
snake_case_ : int = 1024
snake_case_ : Optional[int] = 4096
snake_case_ : Any = 24
snake_case_ : List[Any] = 16
snake_case_ : Tuple = [5, 11, 17, 23]
snake_case_ : int = [256, 512, 1024, 1024]
snake_case_ : Optional[Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
snake_case_ : Dict = 768
snake_case_ : str = [1, 1, 1, 0.5]
snake_case_ : Any = [256, 512, 768, 768]
snake_case_ : Optional[Any] = 150
snake_case_ : List[Any] = 16
snake_case_ : Union[str, Any] = (1, 384, 384)
snake_case_ : int = False
snake_case_ : Any = "project"
if "ade" in checkpoint_url:
snake_case_ : Dict = True
snake_case_ : str = 768
snake_case_ : Union[str, Any] = [1, 1, 1, 0.5]
snake_case_ : Tuple = 150
snake_case_ : Any = 16
snake_case_ : Dict = "huggingface/label-files"
snake_case_ : str = "ade20k-id2label.json"
snake_case_ : Tuple = json.load(open(cached_download(hf_hub_url(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ) ,"r" ) )
snake_case_ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Tuple = idalabel
snake_case_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case_ : str = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__magic_name__ ,__magic_name__ )
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ : Dict = name.replace("pretrained.model" ,"dpt.encoder" )
if "pretrained.model" in name:
snake_case_ : List[str] = name.replace("pretrained.model" ,"dpt.embeddings" )
if "patch_embed" in name:
snake_case_ : int = name.replace("patch_embed" ,"" )
if "pos_embed" in name:
snake_case_ : List[Any] = name.replace("pos_embed" ,"position_embeddings" )
if "attn.proj" in name:
snake_case_ : Any = name.replace("attn.proj" ,"attention.output.dense" )
if "proj" in name and "project" not in name:
snake_case_ : List[str] = name.replace("proj" ,"projection" )
if "blocks" in name:
snake_case_ : Union[str, Any] = name.replace("blocks" ,"layer" )
if "mlp.fc1" in name:
snake_case_ : Union[str, Any] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ : Tuple = name.replace("mlp.fc2" ,"output.dense" )
if "norm1" in name and "backbone" not in name:
snake_case_ : Optional[int] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name and "backbone" not in name:
snake_case_ : Optional[int] = name.replace("norm2" ,"layernorm_after" )
if "scratch.output_conv" in name:
snake_case_ : Union[str, Any] = name.replace("scratch.output_conv" ,"head" )
if "scratch" in name:
snake_case_ : Optional[Any] = name.replace("scratch" ,"neck" )
if "layer1_rn" in name:
snake_case_ : int = name.replace("layer1_rn" ,"convs.0" )
if "layer2_rn" in name:
snake_case_ : Optional[Any] = name.replace("layer2_rn" ,"convs.1" )
if "layer3_rn" in name:
snake_case_ : Union[str, Any] = name.replace("layer3_rn" ,"convs.2" )
if "layer4_rn" in name:
snake_case_ : Optional[int] = name.replace("layer4_rn" ,"convs.3" )
if "refinenet" in name:
snake_case_ : int = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ : Optional[Any] = name.replace(F'''refinenet{layer_idx}''' ,F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
snake_case_ : Tuple = name.replace("out_conv" ,"projection" )
if "resConfUnit1" in name:
snake_case_ : List[str] = name.replace("resConfUnit1" ,"residual_layer1" )
if "resConfUnit2" in name:
snake_case_ : Union[str, Any] = name.replace("resConfUnit2" ,"residual_layer2" )
if "conv1" in name:
snake_case_ : List[str] = name.replace("conv1" ,"convolution1" )
if "conv2" in name:
snake_case_ : Optional[int] = name.replace("conv2" ,"convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ : Dict = name.replace("pretrained.act_postprocess1.0.project.0" ,"neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ : List[Any] = name.replace("pretrained.act_postprocess2.0.project.0" ,"neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ : Tuple = name.replace("pretrained.act_postprocess3.0.project.0" ,"neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ : Any = name.replace("pretrained.act_postprocess4.0.project.0" ,"neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ : Optional[Any] = name.replace("pretrained.act_postprocess1.3" ,"neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
snake_case_ : Tuple = name.replace("pretrained.act_postprocess1.4" ,"neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
snake_case_ : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" ,"neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
snake_case_ : Optional[Any] = name.replace("pretrained.act_postprocess2.4" ,"neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
snake_case_ : str = name.replace("pretrained.act_postprocess3.3" ,"neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
snake_case_ : Optional[int] = name.replace("pretrained.act_postprocess4.3" ,"neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
snake_case_ : int = name.replace("pretrained.act_postprocess4.4" ,"neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
snake_case_ : str = name.replace("pretrained" ,"dpt" )
if "bn" in name:
snake_case_ : Tuple = name.replace("bn" ,"batch_norm" )
if "head" in name:
snake_case_ : Dict = name.replace("head" ,"head.head" )
if "encoder.norm" in name:
snake_case_ : Optional[Any] = name.replace("encoder.norm" ,"layernorm" )
if "auxlayer" in name:
snake_case_ : Dict = name.replace("auxlayer" ,"auxiliary_head.head" )
if "backbone" in name:
snake_case_ : Optional[int] = name.replace("backbone" ,"backbone.bit.encoder" )
if ".." in name:
snake_case_ : Tuple = name.replace(".." ,"." )
if "stem.conv" in name:
snake_case_ : str = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
snake_case_ : Tuple = name.replace("blocks" ,"layers" )
if "convolution" in name and "backbone" in name:
snake_case_ : Optional[int] = name.replace("convolution" ,"conv" )
if "layer" in name and "backbone" in name:
snake_case_ : str = name.replace("layer" ,"layers" )
if "backbone.bit.encoder.bit" in name:
snake_case_ : Optional[int] = name.replace("backbone.bit.encoder.bit" ,"backbone.bit" )
if "embedder.conv" in name:
snake_case_ : Union[str, Any] = name.replace("embedder.conv" ,"embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
snake_case_ : Optional[int] = name.replace("backbone.bit.encoder.stem.norm" ,"backbone.bit.embedder.norm" )
return name
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : Optional[int] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
snake_case_ : Union[str, Any] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : int = in_proj_weight[: config.hidden_size, :]
snake_case_ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : Any = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Any = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_, snake_case_ : List[Any] = get_dpt_config(__magic_name__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__magic_name__ )
# rename keys
for key in state_dict.copy().keys():
snake_case_ : Optional[int] = state_dict.pop(__magic_name__ )
snake_case_ : Dict = val
# read in qkv matrices
read_in_q_k_v(__magic_name__ ,__magic_name__ )
# load HuggingFace model
snake_case_ : Optional[int] = DPTForSemanticSegmentation(__magic_name__ ) if "ade" in checkpoint_url else DPTForDepthEstimation(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# Check outputs on an image
snake_case_ : int = 480 if "ade" in checkpoint_url else 384
snake_case_ : Optional[int] = DPTImageProcessor(size=__magic_name__ )
snake_case_ : str = prepare_img()
snake_case_ : List[str] = image_processor(__magic_name__ ,return_tensors="pt" )
# forward pass
snake_case_ : Any = model(**__magic_name__ ).logits if "ade" in checkpoint_url else model(**__magic_name__ ).predicted_depth
if show_prediction:
snake_case_ : int = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode="bicubic" ,align_corners=__magic_name__ ,)
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (a_ ):
"""simple docstring"""
a__ = (UnCLIPScheduler,)
def _A ( self :Optional[Any] , **lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = {
"num_train_timesteps": 1_000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowerCAmelCase__ )
return config
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _A ( self :str ) -> str:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCAmelCase__ )
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _A ( self :str ) -> Any:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : str = self.get_scheduler_config(variance_type="fixed_small_log" )
snake_case_ : List[str] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scheduler_classes[0]
snake_case_ : Tuple = self.get_scheduler_config(variance_type="learned_range" )
snake_case_ : Tuple = scheduler_class(**lowerCAmelCase__ )
snake_case_ : List[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowerCAmelCase__ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowerCAmelCase__ ) - -0.0_0_1_0_0_1_1 < 1E-5
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**lowerCAmelCase__ )
snake_case_ : Optional[int] = scheduler.timesteps
snake_case_ : int = self.dummy_model()
snake_case_ : List[Any] = self.dummy_sample_deter
snake_case_ : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
snake_case_ : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
snake_case_ : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
snake_case_ : Tuple = pred_prev_sample
snake_case_ : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
snake_case_ : Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def _A ( self :Dict ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(25 )
snake_case_ : Optional[int] = scheduler.timesteps
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Union[str, Any] = self.dummy_sample_deter
snake_case_ : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
snake_case_ : List[str] = model(lowerCAmelCase__ , lowerCAmelCase__ )
if i + 1 == timesteps.shape[0]:
snake_case_ : int = None
else:
snake_case_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
snake_case_ : str = scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
snake_case_ : Any = pred_prev_sample
snake_case_ : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
snake_case_ : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
pass
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ (metaclass=a_ ):
"""simple docstring"""
a__ = ['''flax''', '''transformers''']
def __init__( self :int , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _A ( cls :Optional[int] , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _A ( cls :Tuple , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class A_ (metaclass=a_ ):
"""simple docstring"""
a__ = ['''flax''', '''transformers''']
def __init__( self :Optional[Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _A ( cls :str , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _A ( cls :List[Any] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class A_ (metaclass=a_ ):
"""simple docstring"""
a__ = ['''flax''', '''transformers''']
def __init__( self :Union[str, Any] , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _A ( cls :Tuple , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _A ( cls :Any , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class A_ (metaclass=a_ ):
"""simple docstring"""
a__ = ['''flax''', '''transformers''']
def __init__( self :int , *lowerCAmelCase__ :str , **lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _A ( cls :Any , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _A ( cls :Optional[int] , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ (a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = AltDiffusionPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
snake_case_ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
snake_case_ : int = CLIPTextModel(lowerCAmelCase__ )
snake_case_ : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int]=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
snake_case_ : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case_ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _A ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Union[str, Any] = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : int = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
snake_case_ : List[Any] = text_encoder
snake_case_ : int = AltDiffusionPipeline(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : int = "A photo of an astronaut"
snake_case_ : List[Any] = alt_pipe(**lowerCAmelCase__ )
snake_case_ : Dict = output.images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Optional[int] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.get_dummy_components()
snake_case_ : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
snake_case_ : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : Dict = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
snake_case_ : str = text_encoder
snake_case_ : Any = AltDiffusionPipeline(**lowerCAmelCase__ )
snake_case_ : str = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : Tuple = alt_pipe(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = output.images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[Any] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=lowerCAmelCase__ )
snake_case_ : str = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Tuple = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
snake_case_ : Union[str, Any] = output.images
snake_case_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Dict = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
snake_case_ : List[str] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = "A painting of a squirrel eating a burger"
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = alt_pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="numpy" )
snake_case_ : Optional[int] = output.images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Optional[Any] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
from collections import namedtuple
__lowerCamelCase : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
__lowerCamelCase : Optional[int] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00_454, 264.172),
'''cubicyard''': from_to(0.76_455, 1.30_795),
'''cubicfoot''': from_to(0.028, 35.3_147),
'''cup''': from_to(0.000_236_588, 4_226.75),
}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(__magic_name__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(__magic_name__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ = 50 )-> int:
"""simple docstring"""
snake_case_ : Tuple = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCamelCase : List[str] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__lowerCamelCase : int = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__lowerCamelCase : Optional[Any] = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
snake_case_ : Tuple = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
snake_case_ : Any = ""
while len(__magic_name__ ) % 3 != 0:
snake_case_ : Tuple = "0" + bin_string
snake_case_ : List[str] = [
bin_string[index : index + 3]
for index in range(len(__magic_name__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
snake_case_ : Union[str, Any] = 0
for index, val in enumerate(__magic_name__ ):
oct_val += int(2 ** (2 - index) * int(__magic_name__ ) )
oct_string += str(__magic_name__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
return len(set(__magic_name__ ) ) == len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(__magic_name__ ,__magic_name__ ,__magic_name__=0 ,__magic_name__=None ):
snake_case_ : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case_ : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case_ : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
snake_case_ : Union[str, Any] = (output_size, output_size) if isinstance(__magic_name__ ,__magic_name__ ) else output_size
snake_case_, snake_case_ : Optional[Any] = get_image_size(__magic_name__ )
snake_case_, snake_case_ : Optional[Any] = output_size
# determine new height and width
snake_case_ : str = output_height / input_height
snake_case_ : List[str] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case_ : int = scale_width
else:
# fit height
snake_case_ : List[str] = scale_height
snake_case_ : Optional[int] = constraint_to_multiple_of(scale_height * input_height ,multiple=__magic_name__ )
snake_case_ : List[Any] = constraint_to_multiple_of(scale_width * input_width ,multiple=__magic_name__ )
return (new_height, new_width)
class A_ (a_ ):
"""simple docstring"""
a__ = ['''pixel_values''']
def __init__( self :Any , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :List[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Tuple = size if size is not None else {"height": 384, "width": 384}
snake_case_ : Optional[Any] = get_size_dict(lowerCAmelCase__ )
snake_case_ : Optional[int] = do_resize
snake_case_ : List[Any] = size
snake_case_ : Dict = keep_aspect_ratio
snake_case_ : Dict = ensure_multiple_of
snake_case_ : str = resample
snake_case_ : List[str] = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Tuple = do_normalize
snake_case_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self :Tuple , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Tuple , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Any = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
snake_case_ : Dict = get_resize_output_image_size(
lowerCAmelCase__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=lowerCAmelCase__ , multiple=lowerCAmelCase__ , )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Tuple , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Dict , ) -> int:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :int = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :int = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ :List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = size if size is not None else self.size
snake_case_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
snake_case_ : List[str] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case_ : int = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case_ : Any = resample if resample is not None else self.resample
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : str = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : str = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
snake_case_ : List[str] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
snake_case_ : Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
snake_case_ : List[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
snake_case_ : Tuple = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
snake_case_ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Tuple] = None ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = target_sizes.numpy()
snake_case_ : Tuple = []
for idx in range(len(lowerCAmelCase__ ) ):
snake_case_ : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase__ )
snake_case_ : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
snake_case_ : int = logits.argmax(dim=1 )
snake_case_ : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''nllb-moe'''
a__ = ['''past_key_values''']
a__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , lowerCAmelCase__ :Optional[Any]=128_112 , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=4_096 , lowerCAmelCase__ :int=16 , lowerCAmelCase__ :Optional[int]=0.0_5 , lowerCAmelCase__ :str=0.0_5 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :str="relu" , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :str="float32" , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :List[str]=128 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :str=0.0_0_1 , lowerCAmelCase__ :List[Any]=0.0_0_1 , lowerCAmelCase__ :Any="all" , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Optional[Any]=1.0 , lowerCAmelCase__ :Any=0.2 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :Optional[int]=False , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : str = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[int] = d_model
snake_case_ : Optional[Any] = encoder_ffn_dim
snake_case_ : Optional[int] = encoder_layers
snake_case_ : Union[str, Any] = encoder_attention_heads
snake_case_ : Tuple = decoder_ffn_dim
snake_case_ : List[str] = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : str = dropout
snake_case_ : str = attention_dropout
snake_case_ : Union[str, Any] = activation_dropout
snake_case_ : Tuple = activation_function
snake_case_ : Any = init_std
snake_case_ : List[str] = encoder_layerdrop
snake_case_ : Optional[int] = decoder_layerdrop
snake_case_ : Tuple = use_cache
snake_case_ : Optional[Any] = encoder_layers
snake_case_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ : Optional[Any] = router_z_loss_coef
snake_case_ : List[Any] = router_aux_loss_coef
snake_case_ : Any = decoder_sparse_step
snake_case_ : Any = encoder_sparse_step
snake_case_ : List[str] = num_experts
snake_case_ : Dict = expert_capacity
snake_case_ : Any = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
snake_case_ : int = router_dtype
snake_case_ : Any = router_ignore_padding_tokens
snake_case_ : List[Any] = batch_prioritized_routing
snake_case_ : Tuple = second_expert_policy
snake_case_ : Tuple = normalize_router_prob_before_dropping
snake_case_ : Any = moe_eval_capacity_token_fraction
snake_case_ : Optional[Any] = moe_token_dropout
snake_case_ : List[Any] = output_router_logits
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
from torch import nn
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ = 1_6000 )-> Optional[int]:
"""simple docstring"""
snake_case_ : List[str] = int(round(sample_rate * max_length ) )
if len(__magic_name__ ) <= sample_length:
return wav
snake_case_ : str = randint(0 ,len(__magic_name__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class A_ :
"""simple docstring"""
a__ = field(default=a_ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
a__ = field(
default=a_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
a__ = field(
default=a_ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
a__ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
a__ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
a__ = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
a__ = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
a__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
a__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
a__ = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class A_ :
"""simple docstring"""
a__ = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
a__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
a__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
a__ = field(
default=a_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
a__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
a__ = field(
default=a_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , lowerCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" ,__magic_name__ ,__magic_name__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
snake_case_ : int = DatasetDict()
snake_case_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case_ : Optional[Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case_ : List[str] = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case_ : Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(__magic_name__ ):
snake_case_ : List[str] = []
for audio in batch[data_args.audio_column_name]:
snake_case_ : List[Any] = random_subsample(
audio["array"] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__magic_name__ )
snake_case_ : Any = feature_extractor(__magic_name__ ,sampling_rate=feature_extractor.sampling_rate )
snake_case_ : Optional[Any] = {model_input_name: inputs.get(__magic_name__ )}
snake_case_ : Dict = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__magic_name__ ):
snake_case_ : Tuple = [audio["array"] for audio in batch[data_args.audio_column_name]]
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ ,sampling_rate=feature_extractor.sampling_rate )
snake_case_ : Dict = {model_input_name: inputs.get(__magic_name__ )}
snake_case_ : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case_ : str = raw_datasets["train"].features[data_args.label_column_name].names
snake_case_, snake_case_ : Tuple = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case_ : Dict = str(__magic_name__ )
snake_case_ : Union[str, Any] = label
# Load the accuracy metric from the datasets package
snake_case_ : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
snake_case_ : Tuple = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=__magic_name__ ,references=eval_pred.label_ids )
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__magic_name__ ) ,labelaid=__magic_name__ ,idalabel=__magic_name__ ,finetuning_task="audio-classification" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case_ : Optional[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=__magic_name__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ : int = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__magic_name__ ,output_all_columns=__magic_name__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ : Tuple = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__magic_name__ ,output_all_columns=__magic_name__ )
# Initialize our trainer
snake_case_ : List[str] = Trainer(
model=__magic_name__ ,args=__magic_name__ ,train_dataset=raw_datasets["train"] if training_args.do_train else None ,eval_dataset=raw_datasets["eval"] if training_args.do_eval else None ,compute_metrics=__magic_name__ ,tokenizer=__magic_name__ ,)
# Training
if training_args.do_train:
snake_case_ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : List[Any] = last_checkpoint
snake_case_ : str = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics("train" ,train_result.metrics )
trainer.save_metrics("train" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : List[Any] = trainer.evaluate()
trainer.log_metrics("eval" ,__magic_name__ )
trainer.save_metrics("eval" ,__magic_name__ )
# Write model card and (optionally) push to hub
snake_case_ : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A_ (a_ , a_ ):
"""simple docstring"""
a__ = '''nat'''
a__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :str=[3, 4, 6, 5] , lowerCAmelCase__ :List[str]=[2, 4, 8, 16] , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :List[Any]=3.0 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :List[Any]=1E-5 , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=None , **lowerCAmelCase__ :Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : List[str] = patch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[int] = embed_dim
snake_case_ : List[Any] = depths
snake_case_ : str = len(lowerCAmelCase__ )
snake_case_ : int = num_heads
snake_case_ : Union[str, Any] = kernel_size
snake_case_ : int = mlp_ratio
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Optional[int] = drop_path_rate
snake_case_ : str = hidden_act
snake_case_ : Dict = layer_norm_eps
snake_case_ : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
snake_case_ : Any = layer_scale_init_value
snake_case_ : Optional[Any] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
snake_case_, snake_case_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
from math import factorial
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = real
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [1] * rank
else:
snake_case_ : Any = rank
def __repr__( self :int ) -> Optional[Any]:
'''simple docstring'''
return (
F'''{self.real}+'''
F'''{'+'.join(str(lowerCAmelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCAmelCase__ )
def __add__( self :List[str] , lowerCAmelCase__ :str ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return Dual(self.real + other , self.duals )
snake_case_ : Optional[Any] = self.duals.copy()
snake_case_ : Any = other.duals.copy()
if len(lowerCAmelCase__ ) > len(lowerCAmelCase__ ):
o_dual.extend([1] * (len(lowerCAmelCase__ ) - len(lowerCAmelCase__ )) )
elif len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ):
s_dual.extend([1] * (len(lowerCAmelCase__ ) - len(lowerCAmelCase__ )) )
snake_case_ : Optional[int] = []
for i in range(len(lowerCAmelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCAmelCase__ )
a__ = __add__
def __sub__( self :Dict , lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
return self + other * -1
def __mul__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Any = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCAmelCase__ )
snake_case_ : Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCAmelCase__ )
a__ = __mul__
def __truediv__( self :int , lowerCAmelCase__ :Any ) -> Tuple:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCAmelCase__ )
raise ValueError
def __floordiv__( self :List[Any] , lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : int = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCAmelCase__ )
raise ValueError
def __pow__( self :Optional[Any] , lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ : Tuple = self
for _ in range(n - 1 ):
x *= self
return x
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not callable(__magic_name__ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__magic_name__ ,(float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("differentiate() requires an int as input for order" )
snake_case_ : Optional[Any] = Dual(__magic_name__ ,1 )
snake_case_ : Union[str, Any] = func(__magic_name__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[str] = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = os.path.join(args.tf_model_dir ,"parameters.json" )
snake_case_ : Dict = json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
snake_case_ : Dict = args.output + ".pt"
snake_case_ : Optional[Any] = OrderedDict()
with tf.device("/CPU:0" ):
snake_case_ : Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ : List[str] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ : Union[str, Any] = reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
snake_case_ : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
snake_case_ : Optional[int] = 8
snake_case_ : List[Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[Any] = torch.tensor(__magic_name__ )
elif key_name.startswith("model/moe" ):
snake_case_ : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
snake_case_ : List[Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
snake_case_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[str] = torch.tensor(__magic_name__ )
elif key_name.endswith("/softmlp/kernel" ):
snake_case_ : Tuple = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
snake_case_ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Optional[Any] = torch.tensor(__magic_name__ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
snake_case_ : Tuple = key_name[-9:-7]
for i in range(16 ):
snake_case_ : str = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
snake_case_ : List[str] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ : Union[str, Any] = torch.tensor(__magic_name__ )
elif key_name.startswith("model/mlp" ):
snake_case_ : Dict = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
snake_case_ : Tuple = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
snake_case_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : int = torch.tensor(__magic_name__ )
elif key_name.endswith("/p1/bias" ):
snake_case_ : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
snake_case_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
snake_case_ : Union[str, Any] = torch.tensor(__magic_name__ )
elif key_name.endswith("/p2/kernel" ):
snake_case_ : str = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
snake_case_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[Any] = torch.tensor(__magic_name__ )
elif key_name.endswith("/p2/bias" ):
snake_case_ : List[str] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
snake_case_ : str = vnp.copy() # same because it is one dimensional
snake_case_ : str = torch.tensor(__magic_name__ )
elif key_name.startswith("model/ln" ):
snake_case_ : str = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ : Dict = "model.blocks.%d.feed_forward.norm.bias" % player
snake_case_ : List[Any] = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[int] = torch.tensor(__magic_name__ )
elif key_name.endswith("/g" ):
snake_case_ : Tuple = "model.blocks.%d.feed_forward.norm.weight" % player
snake_case_ : Dict = vnp.copy() # same because it is one dimensional
snake_case_ : Dict = torch.tensor(__magic_name__ )
elif key_name.startswith("model/att" ):
snake_case_ : int = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
snake_case_ : Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ : Union[str, Any] = state[:, 0, :, :]
snake_case_ : Dict = state[:, 1, :, :]
snake_case_ : List[str] = state[:, 2, :, :]
snake_case_ : int = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[str] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[str] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
snake_case_ : List[str] = torch.tensor(__magic_name__ )
snake_case_ : Optional[int] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
snake_case_ : Dict = torch.tensor(__magic_name__ )
snake_case_ : Tuple = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
snake_case_ : Tuple = torch.tensor(__magic_name__ )
elif key_name.endswith("/o/kernel" ):
snake_case_ : Tuple = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
snake_case_ : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : int = torch.tensor(__magic_name__ )
elif key_name.startswith("model/an" ):
snake_case_ : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ : List[str] = "model.blocks.%d.self_attn.norm.bias" % player
snake_case_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
snake_case_ : Any = torch.tensor(__magic_name__ )
elif key_name.endswith("/g" ):
snake_case_ : Union[str, Any] = "model.blocks.%d.self_attn.norm.weight" % player
snake_case_ : str = vnp.copy() # same because it is one dimensional
snake_case_ : int = torch.tensor(__magic_name__ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
snake_case_ : int = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
snake_case_ : Optional[Any] = "model.%s.weight" % nlayer
snake_case_ : Dict = vnp.copy() # same in embedded
snake_case_ : Union[str, Any] = torch.tensor(__magic_name__ )
if key_name.startswith("model/wte" ):
snake_case_ : Any = "lm_head.weight"
snake_case_ : int = vnp.copy() # same in embedded
snake_case_ : Dict = torch.tensor(__magic_name__ )
elif key_name.startswith("model/wob" ):
snake_case_ : Dict = "final_logits_bias"
snake_case_ : Optional[Any] = vnp.copy() # same in embedded
snake_case_ : Optional[int] = state.reshape((1, -1) )
snake_case_ : List[Any] = torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
snake_case_ : Optional[Any] = "model.last_project.weight"
snake_case_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : str = torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
snake_case_ : List[str] = "model.last_project.bias"
snake_case_ : List[Any] = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[int] = torch.tensor(__magic_name__ )
torch.save(__magic_name__ ,args.output )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for line in lines:
snake_case_ : Any = re.sub(r"#.*" ,"" ,__magic_name__ ) # remove comments
if line:
filtered_lines.append(__magic_name__ )
snake_case_ : Optional[Any] = "\n".join(__magic_name__ )
# Make a hash from all this code
snake_case_ : List[str] = full_str.encode("utf-8" )
return shaaaa(__magic_name__ ).hexdigest()
# get importable module names and hash for caching
__lowerCamelCase : Dict = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowerCamelCase : Union[str, Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowerCamelCase : str = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__lowerCamelCase : Any = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowerCamelCase : str = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __UpperCAmelCase ( __magic_name__ )-> list[list[int]]:
"""simple docstring"""
snake_case_ : Optional[int] = []
for i in range(len(__magic_name__ ) ):
snake_case_ : Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case_ : str = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case_ : List[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[Image.Image]:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for _ in range(__magic_name__ ):
# Create output image
snake_case_ : int = Image.new("RGB" ,(len(cells[0] ), len(__magic_name__ )) )
snake_case_ : List[str] = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
snake_case_ : Union[str, Any] = 255 - cells[y][x] * 255
snake_case_ : Tuple = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
snake_case_ : Any = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = ""
for i in table:
res += inp[i - 1]
return res
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return data[1:] + data[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = ""
for i in range(len(__magic_name__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : str = int("0b" + data[0] + data[-1] ,2 )
snake_case_ : List[str] = int("0b" + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[str] = message[:4]
snake_case_ : Optional[Any] = message[4:]
snake_case_ : Optional[int] = apply_table(__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = xor(__magic_name__ ,__magic_name__ )
snake_case_ : Optional[Any] = apply_sbox(__magic_name__ ,temp[:4] ) # noqa: E741
snake_case_ : Dict = apply_sbox(__magic_name__ ,temp[4:] )
snake_case_ : str = "0" * (2 - len(__magic_name__ )) + l # noqa: E741
snake_case_ : Dict = "0" * (2 - len(__magic_name__ )) + r
snake_case_ : int = apply_table(l + r ,__magic_name__ )
snake_case_ : Dict = xor(__magic_name__ ,__magic_name__ )
return temp + right
if __name__ == "__main__":
__lowerCamelCase : Any = input('''Enter 10 bit key: ''')
__lowerCamelCase : List[Any] = input('''Enter 8 bit message: ''')
__lowerCamelCase : Union[str, Any] = [6, 3, 7, 4, 8, 5, 10, 9]
__lowerCamelCase : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowerCamelCase : int = [2, 4, 3, 1]
__lowerCamelCase : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
__lowerCamelCase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
__lowerCamelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__lowerCamelCase : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCamelCase : Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCamelCase : List[Any] = apply_table(key, paa_table)
__lowerCamelCase : str = temp[:5]
__lowerCamelCase : Dict = temp[5:]
__lowerCamelCase : Optional[int] = left_shift(left)
__lowerCamelCase : Optional[int] = left_shift(right)
__lowerCamelCase : Any = apply_table(left + right, pa_table)
__lowerCamelCase : str = left_shift(left)
__lowerCamelCase : Optional[Any] = left_shift(right)
__lowerCamelCase : str = left_shift(left)
__lowerCamelCase : str = left_shift(right)
__lowerCamelCase : Any = apply_table(left + right, pa_table)
# encryption
__lowerCamelCase : Dict = apply_table(message, IP)
__lowerCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase : Optional[Any] = temp[4:] + temp[:4]
__lowerCamelCase : List[str] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
__lowerCamelCase : Optional[int] = apply_table(CT, IP)
__lowerCamelCase : Tuple = function(expansion, sa, sa, keya, temp)
__lowerCamelCase : Optional[Any] = temp[4:] + temp[:4]
__lowerCamelCase : str = function(expansion, sa, sa, keya, temp)
__lowerCamelCase : List[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ (metaclass=a_ ):
"""simple docstring"""
a__ = ['''torch''', '''torchsde''']
def __init__( self :List[str] , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _A ( cls :List[Any] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _A ( cls :str , *lowerCAmelCase__ :int , **lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[int]]:
"""simple docstring"""
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : Dict = 0
snake_case_ : str = sum(__magic_name__ )
create_state_space_tree(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return result
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)-> None:
"""simple docstring"""
if sum(__magic_name__ ) > max_sum or (remaining_nums_sum + sum(__magic_name__ )) < max_sum:
return
if sum(__magic_name__ ) == max_sum:
result.append(__magic_name__ )
return
for index in range(__magic_name__ ,len(__magic_name__ ) ):
create_state_space_tree(
__magic_name__ ,__magic_name__ ,index + 1 ,[*path, nums[index]] ,__magic_name__ ,remaining_nums_sum - nums[index] ,)
__lowerCamelCase : Tuple = [3, 34, 4, 12, 5, 2]
__lowerCamelCase : List[Any] = 9
__lowerCamelCase : int = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__="attention" )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
snake_case_ : Union[str, Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
snake_case_ : Tuple = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
snake_case_ : Union[str, Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__=False )-> Dict:
"""simple docstring"""
if split_mlp_wi:
snake_case_ : Tuple = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
snake_case_ : List[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
snake_case_ : List[str] = (wi_a, wi_a)
else:
snake_case_ : List[str] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
snake_case_ : Optional[int] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __UpperCAmelCase ( __magic_name__ ,*, __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Tuple = traverse_util.flatten_dict(variables["target"] )
snake_case_ : Dict = {"/".join(__magic_name__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ : Optional[int] = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" ,__magic_name__ )
snake_case_ : Any = collections.OrderedDict()
# Shared embeddings.
snake_case_ : Tuple = old["token_embedder/embedding"]
# Encoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
snake_case_ : int = tax_layer_norm_lookup(__magic_name__ ,__magic_name__ ,"encoder" ,"pre_attention_layer_norm" )
snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = tax_attention_lookup(__magic_name__ ,__magic_name__ ,"encoder" ,"attention" )
snake_case_ : List[str] = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : int = o.T
snake_case_ : List[Any] = q.T
snake_case_ : Optional[Any] = v.T
# Block i, layer 1 (MLP).
snake_case_ : int = tax_layer_norm_lookup(__magic_name__ ,__magic_name__ ,"encoder" ,"pre_mlp_layer_norm" )
snake_case_, snake_case_ : Optional[Any] = tax_mlp_lookup(__magic_name__ ,__magic_name__ ,"encoder" ,__magic_name__ )
snake_case_ : Dict = layer_norm
if split_mlp_wi:
snake_case_ : str = wi[0].T
snake_case_ : Optional[int] = wi[1].T
else:
snake_case_ : Union[str, Any] = wi.T
snake_case_ : Optional[Any] = wo.T
snake_case_ : Union[str, Any] = old[
"encoder/relpos_bias/rel_embedding"
].T
snake_case_ : Optional[Any] = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
snake_case_ : List[Any] = tax_layer_norm_lookup(__magic_name__ ,__magic_name__ ,"decoder" ,"pre_self_attention_layer_norm" )
snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = tax_attention_lookup(__magic_name__ ,__magic_name__ ,"decoder" ,"self_attention" )
snake_case_ : str = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : List[str] = o.T
snake_case_ : Any = q.T
snake_case_ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ : int = tax_layer_norm_lookup(__magic_name__ ,__magic_name__ ,"decoder" ,"pre_cross_attention_layer_norm" )
snake_case_, snake_case_, snake_case_, snake_case_ : Dict = tax_attention_lookup(__magic_name__ ,__magic_name__ ,"decoder" ,"encoder_decoder_attention" )
snake_case_ : List[str] = layer_norm
snake_case_ : Any = k.T
snake_case_ : Dict = o.T
snake_case_ : Dict = q.T
snake_case_ : List[Any] = v.T
# Block i, layer 2 (MLP).
snake_case_ : Tuple = tax_layer_norm_lookup(__magic_name__ ,__magic_name__ ,"decoder" ,"pre_mlp_layer_norm" )
snake_case_, snake_case_ : Optional[int] = tax_mlp_lookup(__magic_name__ ,__magic_name__ ,"decoder" ,__magic_name__ )
snake_case_ : Union[str, Any] = layer_norm
if split_mlp_wi:
snake_case_ : List[str] = wi[0].T
snake_case_ : Optional[int] = wi[1].T
else:
snake_case_ : List[str] = wi.T
snake_case_ : str = wo.T
snake_case_ : Dict = old["decoder/decoder_norm/scale"]
snake_case_ : Union[str, Any] = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ : str = old["decoder/logits_dense/kernel"].T
return new
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ : Any = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ : Union[str, Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
snake_case_ : Union[str, Any] = state_dict["shared.weight"]
return state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = checkpoints.load_tax_checkpoint(__magic_name__ )
snake_case_ : Any = convert_tax_to_pytorch(__magic_name__ ,num_layers=config.num_layers ,is_encoder_only=__magic_name__ )
snake_case_ : List[str] = make_state_dict(__magic_name__ ,__magic_name__ )
model.load_state_dict(__magic_name__ ,strict=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = False )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = TaConfig.from_json_file(__magic_name__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ : Union[str, Any] = TaEncoderModel(__magic_name__ )
else:
snake_case_ : Optional[Any] = TaForConditionalGeneration(__magic_name__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__magic_name__ )
# Verify that we can load the checkpoint.
model.from_pretrained(__magic_name__ )
print("Done" )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__lowerCamelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.