code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''ViTFeatureExtractor''']
lowercase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase = '''<<<<<<< This should probably be modified because it mentions: '''
lowercase = '''=======
>>>>>>>
'''
lowercase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowercase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase ( __a):
'''simple docstring'''
@staticmethod
def lowercase_ ( lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to the HuggingFace Datasets folder.")
train_parser.set_defaults(func=lowerCAmelCase_)
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =get_logger("datasets-cli/converting")
a_ =tfds_path
a_ =datasets_directory
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if os.path.isdir(self._tfds_path):
a_ =os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
a_ =os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
a_ =os.path.abspath(self._datasets_directory)
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""")
a_ =[]
a_ =[]
a_ ={}
if os.path.isdir(self._tfds_path):
a_ =os.listdir(lowerCAmelCase_)
else:
a_ =[os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""")
a_ =os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
a_ =os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
if not os.path.isfile(lowerCAmelCase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(lowerCAmelCase_ , encoding="utf-8") as f:
a_ =f.readlines()
a_ =[]
a_ =False
a_ =False
a_ =[]
for line in lines:
a_ =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ ="import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a_ =""
continue
elif "from absl import logging" in out_line:
a_ ="from datasets import logging\n"
elif "getLogger" in out_line:
a_ =out_line.replace("getLogger" , "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
a_ =True
a_ =list(filter(lambda lowerCAmelCase_: e in out_line , lowerCAmelCase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase_) + "\n")
out_lines.append(lowerCAmelCase_)
out_lines.append(lowerCAmelCase_)
continue
else:
for pattern, replacement in TO_CONVERT:
a_ =re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ =re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCAmelCase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
a_ ="from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ =True
out_lines.append(lowerCAmelCase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ =f_name.replace(".py" , "")
a_ =os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
a_ =os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
self._logger.info(f"""Adding directory {output_dir}""")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase_)
if needs_manual_update:
with_manual_update.append(lowerCAmelCase_)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as f:
f.writelines(lowerCAmelCase_)
self._logger.info(f"""Converted in {output_file}""")
for utils_file in utils_files:
try:
a_ =os.path.basename(lowerCAmelCase_)
a_ =imports_to_builder_map[f_name.replace(".py" , "")]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""")
shutil.copy(lowerCAmelCase_ , lowerCAmelCase_)
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""")
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Tuple = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_5_5 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =size if size is not None else {"shortest_edge": 2_2_4}
a_ =get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_)
a_ =crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
a_ =get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name="crop_size")
a_ =do_resize
a_ =size
a_ =resample
a_ =do_center_crop
a_ =crop_size
a_ =do_rescale
a_ =rescale_factor
a_ =do_normalize
a_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ =image_std if image_std is not None else OPENAI_CLIP_STD
a_ =do_convert_rgb
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
a_ =get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a_ =get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_)
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
a_ =get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
"""simple docstring"""
a_ =do_resize if do_resize is not None else self.do_resize
a_ =size if size is not None else self.size
a_ =get_size_dict(lowerCAmelCase_ , param_name="size" , default_to_square=lowerCAmelCase_)
a_ =resample if resample is not None else self.resample
a_ =do_center_crop if do_center_crop is not None else self.do_center_crop
a_ =crop_size if crop_size is not None else self.crop_size
a_ =get_size_dict(lowerCAmelCase_ , param_name="crop_size" , default_to_square=lowerCAmelCase_)
a_ =do_rescale if do_rescale is not None else self.do_rescale
a_ =rescale_factor if rescale_factor is not None else self.rescale_factor
a_ =do_normalize if do_normalize is not None else self.do_normalize
a_ =image_mean if image_mean is not None else self.image_mean
a_ =image_std if image_std is not None else self.image_std
a_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ =make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ =[convert_to_rgb(lowerCAmelCase_) for image in images]
# All transformations expect numpy arrays.
a_ =[to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
a_ =[self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_) for image in images]
if do_center_crop:
a_ =[self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_) for image in images]
if do_rescale:
a_ =[self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_) for image in images]
if do_normalize:
a_ =[self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_) for image in images]
a_ =[to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_) for image in images]
a_ ={"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase ( __a , __a):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase_ = 1_2_8 , lowerCAmelCase_ = 2_5_6 , lowerCAmelCase_ = 2_0_0_0.0 , lowerCAmelCase_ = 7_6_8 , lowerCAmelCase_ = 1_2 , lowerCAmelCase_ = 1_2 , lowerCAmelCase_ = 6_4 , lowerCAmelCase_ = 2_0_4_8 , lowerCAmelCase_ = 0.1 , ) -> Any:
"""simple docstring"""
super().__init__()
a_ =nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_) , nn.SiLU() , )
a_ =nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_)
a_ =False
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Dropout(p=lowerCAmelCase_)
a_ =nn.ModuleList()
for lyr_num in range(lowerCAmelCase_):
# FiLM conditional T5 decoder
a_ =DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_)
self.decoders.append(lowerCAmelCase_)
a_ =TaLayerNorm(lowerCAmelCase_)
a_ =nn.Dropout(p=lowerCAmelCase_)
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ , a_ , a_ =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
a_ =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
a_ =self.conditioning_emb(lowerCAmelCase_).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
a_ =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
a_ =torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device) , (batch, seq_length) , )
a_ =self.position_encoding(lowerCAmelCase_)
a_ =self.continuous_inputs_projection(lowerCAmelCase_)
inputs += position_encodings
a_ =self.dropout(lowerCAmelCase_)
# decoder: No padding present.
a_ =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
a_ =[(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
a_ =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
a_ =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
a_ =lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
a_ =self.decoder_norm(lowerCAmelCase_)
a_ =self.post_dropout(lowerCAmelCase_)
a_ =self.spec_out(lowerCAmelCase_)
return spec_out
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-6) -> Any:
"""simple docstring"""
super().__init__()
a_ =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[str]:
"""simple docstring"""
a_ =self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
a_ =torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
a_ =self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
a_ =self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_)
return (hidden_states,)
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
super().__init__()
a_ =TaLayerNorm(lowerCAmelCase_)
a_ =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_)
a_ =Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
a_ =self.layer_norm(lowerCAmelCase_)
if conditioning_emb is not None:
a_ =self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_)
# Self-attention block
a_ =self.attention(lowerCAmelCase_)
a_ =hidden_states + self.dropout(lowerCAmelCase_)
return hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
super().__init__()
a_ =Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_)
a_ =TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
"""simple docstring"""
a_ =self.layer_norm(lowerCAmelCase_)
a_ =self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1) , )
a_ =hidden_states + self.dropout(lowerCAmelCase_)
return layer_output
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
super().__init__()
a_ =TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_)
a_ =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_)
a_ =TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None) -> int:
"""simple docstring"""
a_ =self.layer_norm(lowerCAmelCase_)
if conditioning_emb is not None:
a_ =self.film(lowerCAmelCase_ , lowerCAmelCase_)
a_ =self.DenseReluDense(lowerCAmelCase_)
a_ =hidden_states + self.dropout(lowerCAmelCase_)
return hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
a_ =NewGELUActivation()
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.act(self.wi_a(lowerCAmelCase_))
a_ =self.wi_a(lowerCAmelCase_)
a_ =hidden_gelu * hidden_linear
a_ =self.dropout(lowerCAmelCase_)
a_ =self.wo(lowerCAmelCase_)
return hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1e-6) -> List[str]:
"""simple docstring"""
super().__init__()
a_ =nn.Parameter(torch.ones(lowerCAmelCase_))
a_ =eps
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=lowerCAmelCase_)
a_ =hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
a_ =hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.0_4_4_7_1_5 * torch.pow(lowerCAmelCase_ , 3.0))))
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
super().__init__()
a_ =nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =self.scale_bias(lowerCAmelCase_)
a_ , a_ =torch.chunk(lowerCAmelCase_ , 2 , -1)
a_ =x * (1 + scale) + shift
return x
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = XLNetTokenizer
__magic_name__ : Optional[Any] = XLNetTokenizerFast
__magic_name__ : List[Any] = True
__magic_name__ : Dict = True
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a_ =XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ ="<s>"
a_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "<eod>")
self.assertEqual(len(lowerCAmelCase_) , 1_0_0_6)
def lowercase_ ( self) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
a_ =tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
a_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_)
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["▁he", "ll", "o"])
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_)
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =XLNetTokenizer.from_pretrained("xlnet-base-cased")
a_ =tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
a_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ ={"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = ["image_processor", "tokenizer"]
__magic_name__ : Optional[int] = "CLIPImageProcessor"
__magic_name__ : Dict = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
a_ =kwargs.pop("feature_extractor")
a_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
a_ =self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
if images is not None:
a_ =self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None and images is not None:
a_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_) , tensor_type=lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@property
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.tokenizer.model_input_names
a_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if index == number_of_items:
return 0
a_ =0
a_ =0
a_ =knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
a_ =values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=1_8 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=3_2 , lowerCAmelCase_=True , ) -> Tuple:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =image_size
a_ =min_resolution
a_ =max_resolution
a_ =do_resize
a_ =size_divisor
a_ =do_rescale
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = GLPNImageProcessor if is_vision_available() else None
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =GLPNImageProcessingTester(self)
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "size_divisor"))
self.assertTrue(hasattr(lowerCAmelCase_ , "resample"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_rescale"))
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =HfArgumentParser(lowercase__ )
a_ =parser.parse_args_into_dataclasses()[0]
a_ =TensorFlowBenchmark(args=lowercase__ )
try:
a_ =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a_ ="Arg --no_{0} is no longer used, please use --no-{0} instead."
a_ =" ".join(str(lowercase__ ).split(" " )[:-1] )
a_ =""
a_ =eval(str(lowercase__ ).split(" " )[-1] )
a_ =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
a_ =full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "facebook/bart-large-mnli"
__magic_name__ : Optional[Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__magic_name__ : Dict = "text_classifier"
__magic_name__ : str = AutoTokenizer
__magic_name__ : List[str] = AutoModelForSequenceClassification
__magic_name__ : str = ["text", ["text"]]
__magic_name__ : List[Any] = ["text"]
def lowercase_ ( self) -> int:
"""simple docstring"""
super().setup()
a_ =self.model.config
a_ =-1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail"):
a_ =int(lowerCAmelCase_)
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init.")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =labels
return self.pre_processor(
[text] * len(lowerCAmelCase_) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =outputs.logits
a_ =torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
import os
import sys
lowercase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoConfig.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModel.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*lowercase__ , **lowercase__ )
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
import sys
import turtle
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Dict = (DDPMScheduler,)
def lowercase_ ( self , **lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ ={
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**lowerCAmelCase_)
return config
def lowercase_ ( self) -> Dict:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0_0_9_7_9)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.0_2)) < 1e-5
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
a_ =len(lowerCAmelCase_)
a_ =self.dummy_model()
a_ =self.dummy_sample_deter
a_ =torch.manual_seed(0)
for t in reversed(range(lowerCAmelCase_)):
# 1. predict noise residual
a_ =model(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict previous mean of sample x_t-1
a_ =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a_ =pred_prev_sample
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1e-3
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config(prediction_type="v_prediction")
a_ =scheduler_class(**lowerCAmelCase_)
a_ =len(lowerCAmelCase_)
a_ =self.dummy_model()
a_ =self.dummy_sample_deter
a_ =torch.manual_seed(0)
for t in reversed(range(lowerCAmelCase_)):
# 1. predict noise residual
a_ =model(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict previous mean of sample x_t-1
a_ =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a_ =pred_prev_sample
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1e-3
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
a_ =[1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
a_ =scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_):
if i == len(lowerCAmelCase_) - 1:
a_ =-1
else:
a_ =timesteps[i + 1]
a_ =scheduler.previous_timestep(lowerCAmelCase_)
a_ =prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
a_ =[1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
a_ =[1_0_0, 8_7, 5_0, 1, 0]
a_ =len(lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
a_ =[scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , )
def lowercase_ ( self , lowerCAmelCase_ = "auto") -> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase_)
@torch.no_grad()
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = 5_1_2 , lowerCAmelCase_ = 5_1_2 , lowerCAmelCase_ = 5_0 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =1
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =len(lowerCAmelCase_)
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase_)}""")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase_)}.""")
# get prompt text embeddings
a_ =self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
a_ =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
a_ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""")
a_ =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
a_ =self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
a_ , a_ , a_ =text_embeddings.shape
a_ =text_embeddings.repeat(1 , lowerCAmelCase_ , 1)
a_ =text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a_ =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a_ =4_2
if negative_prompt is None:
a_ =[""]
elif type(lowerCAmelCase_) is not type(lowerCAmelCase_):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase_)} !="""
f""" {type(lowerCAmelCase_)}.""")
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =[negative_prompt]
elif batch_size != len(lowerCAmelCase_):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase_)}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`.")
else:
a_ =negative_prompt
a_ =text_input_ids.shape[-1]
a_ =self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" , )
a_ =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a_ =uncond_embeddings.shape[1]
a_ =uncond_embeddings.repeat(lowerCAmelCase_ , lowerCAmelCase_ , 1)
a_ =uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a_ =torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a_ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
a_ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
a_ =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
a_ =torch.randn(
lowerCAmelCase_ , generator=lowerCAmelCase_ , device="cpu" , dtype=lowerCAmelCase_).to(self.device)
a_ =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="cpu" , dtype=lowerCAmelCase_).to(
self.device)
else:
a_ =torch.randn(
lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_)
a_ =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""")
a_ =latents_reference.to(self.device)
a_ =latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
a_ =(latents_shape[3] - latents_shape_reference[3]) // 2
a_ =(latents_shape[2] - latents_shape_reference[2]) // 2
a_ =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
a_ =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
a_ =0 if dx < 0 else dx
a_ =0 if dy < 0 else dy
a_ =max(-dx , 0)
a_ =max(-dy , 0)
# import pdb
# pdb.set_trace()
a_ =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
a_ =self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
a_ =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a_ ="eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
a_ ={}
if accepts_eta:
a_ =eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase_)):
# expand the latents if we are doing classifier free guidance
a_ =torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a_ =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# predict the noise residual
a_ =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_).sample
# perform guidance
if do_classifier_free_guidance:
a_ , a_ =noise_pred.chunk(2)
a_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
a_ =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
a_ =1 / 0.1_8_2_1_5 * latents
a_ =self.vae.decode(lowerCAmelCase_).sample
a_ =(image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a_ =image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if self.safety_checker is not None:
a_ =self.feature_extractor(self.numpy_to_pil(lowerCAmelCase_) , return_tensors="pt").to(
self.device)
a_ , a_ =self.safety_checker(
images=lowerCAmelCase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
a_ =None
if output_type == "pil":
a_ =self.numpy_to_pil(lowerCAmelCase_)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_)
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =LxmertConfig.from_json_file(lowercase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a_ =LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import operator as op
lowercase = '''scaler.pt'''
lowercase = '''pytorch_model'''
lowercase = '''random_states'''
lowercase = '''optimizer'''
lowercase = '''scheduler'''
lowercase = '''pytorch_model.bin'''
lowercase = '''pytorch_model.bin.index.json'''
lowercase = '''model.safetensors'''
lowercase = '''model.safetensors.index.json'''
lowercase = '''1.10.2'''
lowercase = '''py38'''
lowercase = '''4.17.0'''
lowercase = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
lowercase = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
lowercase = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
lowercase = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
lowercase = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
lowercase = '''2.0.1'''
lowercase = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
lowercase = ['''default''', '''reduce-overhead''', '''max-autotune''']
lowercase = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowercase = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
lowercase = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
lowercase = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
from __future__ import annotations
lowercase = 1.6_0_2_1e-1_9 # units = C
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
import numpy as np
import qiskit
def UpperCAmelCase_ ( lowercase__ = 8 , lowercase__ = None ):
'''simple docstring'''
a_ =np.random.default_rng(seed=lowercase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
a_ =6 * key_len
# Measurement basis for Alice's qubits.
a_ =rng.integers(2 , size=lowercase__ )
# The set of states Alice will prepare.
a_ =rng.integers(2 , size=lowercase__ )
# Measurement basis for Bob's qubits.
a_ =rng.integers(2 , size=lowercase__ )
# Quantum Circuit to simulate BB84
a_ =qiskit.QuantumCircuit(lowercase__ , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowercase__ ):
if alice_state[index] == 1:
bbaa_circ.x(lowercase__ )
if alice_basis[index] == 1:
bbaa_circ.h(lowercase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowercase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowercase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
a_ =qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
a_ =qiskit.execute(lowercase__ , lowercase__ , shots=1 , seed_simulator=lowercase__ )
# Returns the result of measurement.
a_ =job.result().get_counts(lowercase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
a_ ="".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowercase__ , lowercase__ , lowercase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
a_ =gen_key[:key_len] if len(lowercase__ ) >= key_len else gen_key.ljust(lowercase__ , "0" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
lowercase = 8.3_144_598
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase = 300
lowercase = 28
lowercase = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowercase = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowercase = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowercase = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowercase = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string")),
"references": datasets.Value("string"),
}) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=[1, 1_0, 1_0_0] , lowerCAmelCase_=4 , lowerCAmelCase_=3.0) -> int:
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows.")
with ThreadPoolExecutor(max_workers=lowerCAmelCase_) as executor:
a_ =[]
a_ =Counter()
a_ =0
a_ =defaultdict(lowerCAmelCase_)
for task_id, (candidates, test_case) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_)):
for candidate in candidates:
a_ =candidate + "\n" + test_case
a_ =(test_program, timeout, task_id, completion_id[task_id])
a_ =executor.submit(lowerCAmelCase_ , *lowerCAmelCase_)
futures.append(lowerCAmelCase_)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCAmelCase_):
a_ =future.result()
results[result["task_id"]].append((result["completion_id"], result))
a_ , a_ =[], []
for result in results.values():
result.sort()
a_ =[r[1]["passed"] for r in result]
total.append(len(lowerCAmelCase_))
correct.append(sum(lowerCAmelCase_))
a_ =np.array(lowerCAmelCase_)
a_ =np.array(lowerCAmelCase_)
a_ =k
a_ ={f"""pass@{k}""": estimate_pass_at_k(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
def estimator(lowercase__ , lowercase__ , lowercase__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase__ , lowercase__ ):
a_ =itertools.repeat(lowercase__ , len(lowercase__ ) )
else:
assert len(lowercase__ ) == len(lowercase__ )
a_ =iter(lowercase__ )
return np.array([estimator(int(lowercase__ ) , int(lowercase__ ) , lowercase__ ) for n, c in zip(lowercase__ , lowercase__ )] )
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowercase = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__ : bool = field(default=__a , metadata={"help": "Whether tp freeze the encoder."})
__magic_name__ : bool = field(default=__a , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__magic_name__ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__ : Optional[int] = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__magic_name__ : Optional[str] = field(default=__a , metadata={"help": "Source language id for translation."})
__magic_name__ : Optional[str] = field(default=__a , metadata={"help": "Target language id for translation."})
__magic_name__ : Optional[int] = field(default=__a , metadata={"help": "# num_beams to use for evaluation."})
__magic_name__ : bool = field(
default=__a , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase__ , os.path.join(lowercase__ , F"""{split}_results.json""" ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ =parser.parse_args_into_dataclasses()
check_output_dir(lowercase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(lowercase__ , lowercase__ , lowercase__ ):
assert hasattr(lowercase__ , lowercase__ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase__ , lowercase__ , getattr(lowercase__ , lowercase__ ) )
a_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=lowercase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a_ =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase__ , lowercase__ ):
a_ =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a_ =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a_ =SeqaSeqDataset
# Get datasets
a_ =(
dataset_class(
lowercase__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a_ =(
dataset_class(
lowercase__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a_ =(
dataset_class(
lowercase__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a_ =(
build_compute_metrics_fn(data_args.task , lowercase__ ) if training_args.predict_with_generate else None
)
a_ =SeqaSeqTrainer(
model=lowercase__ , args=lowercase__ , data_args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , data_collator=SeqaSeqDataCollator(
lowercase__ , lowercase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase__ , tokenizer=lowercase__ , )
a_ ={}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a_ =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a_ =train_result.metrics
a_ =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ =trainer.evaluate(metric_key_prefix="val" )
a_ =data_args.n_val
a_ =round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
a_ =trainer.predict(test_dataset=lowercase__ , metric_key_prefix="test" )
a_ =test_output.metrics
a_ =data_args.n_test
if trainer.is_world_process_zero():
a_ =round(metrics["test_loss"] , 4 )
handle_metrics("test" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.predict_with_generate:
a_ =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
a_ =lmap(str.strip , lowercase__ )
write_txt_file(lowercase__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(lowercase__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase = logging.getLogger(__name__)
lowercase = '''Hello world! cécé herlolip'''
lowercase = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =BertAbsConfig(
temp_dir="." , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
a_ =torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage )
a_ =AbsSummarizer(lowercase__ , torch.device("cpu" ) , lowercase__ )
original.eval()
a_ =BertAbsSummarizer(lowercase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
a_ =BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
a_ =tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowercase__ )) )
a_ =torch.tensor(lowercase__ ).unsqueeze(0 )
a_ =tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowercase__ )) )
a_ =torch.tensor(lowercase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
a_ =encoder_input_ids
a_ =decoder_input_ids
a_ =a_ =None
a_ =None
a_ =a_ =None
a_ =a_ =None
a_ =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
a_ =original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
a_ =original.generator(lowercase__ )
a_ =new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
a_ =new_model.generator(lowercase__ )
a_ =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowercase__ ) )
a_ =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowercase__ ) )
a_ =torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowercase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase = logging.get_logger(__name__)
lowercase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Dict = "detr"
__magic_name__ : Dict = ["past_key_values"]
__magic_name__ : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=1_0_0 , lowerCAmelCase_=6 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_5_6 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , **lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
a_ =CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =backbone_config.get("model_type")
a_ =CONFIG_MAPPING[backbone_model_type]
a_ =config_class.from_dict(lowerCAmelCase_)
# set timm attributes to None
a_ , a_ , a_ =None, None, None
a_ =use_timm_backbone
a_ =backbone_config
a_ =num_channels
a_ =num_queries
a_ =d_model
a_ =encoder_ffn_dim
a_ =encoder_layers
a_ =encoder_attention_heads
a_ =decoder_ffn_dim
a_ =decoder_layers
a_ =decoder_attention_heads
a_ =dropout
a_ =attention_dropout
a_ =activation_dropout
a_ =activation_function
a_ =init_std
a_ =init_xavier_std
a_ =encoder_layerdrop
a_ =decoder_layerdrop
a_ =encoder_layers
a_ =auxiliary_loss
a_ =position_embedding_type
a_ =backbone
a_ =use_pretrained_backbone
a_ =dilation
# Hungarian matcher
a_ =class_cost
a_ =bbox_cost
a_ =giou_cost
# Loss coefficients
a_ =mask_loss_coefficient
a_ =dice_loss_coefficient
a_ =bbox_loss_coefficient
a_ =giou_loss_coefficient
a_ =eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def lowercase_ ( cls , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self) -> Dict[str, any]:
"""simple docstring"""
a_ =copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
a_ =self.backbone_config.to_dict()
a_ =self.__class__.model_type
return output
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = version.parse("1.11")
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
])
@property
def lowercase_ ( self) -> float:
"""simple docstring"""
return 1e-5
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return 1_2
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
a_ =True
# Deal with multi-line cases
elif (
re.search(
rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , lowercase__ , )
is not None
):
a_ =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a_ =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a_ =[
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a_ =["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a_ =True
if not attribute_used:
a_ =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a_ =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a_ =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a_ =True
elif attribute.endswith("_token_id" ):
a_ =True
# configuration class specific cases
if not case_allowed:
a_ =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a_ =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =dict(inspect.signature(config_class.__init__ ).parameters )
a_ =[x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a_ =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a_ ={}
if len(config_class.attribute_map ) > 0:
a_ ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a_ =inspect.getsourcefile(lowercase__ )
a_ =os.path.dirname(lowercase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a_ =[os.path.join(lowercase__ , lowercase__ ) for fn in os.listdir(lowercase__ ) if fn.startswith("modeling_" )]
# Get the source code strings
a_ =[]
for path in modeling_paths:
if os.path.isfile(lowercase__ ):
with open(lowercase__ ) as fp:
modeling_sources.append(fp.read() )
a_ =[]
for config_param, default_value in zip(lowercase__ , lowercase__ ):
# `attributes` here is all the variant names for `config_param`
a_ =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowercase__ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a_ =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowercase__ : inspect.isclass(lowercase__ )
and issubclass(lowercase__ , lowercase__ )
and inspect.getmodule(lowercase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a_ =check_config_attributes_being_used(lowercase__ )
if len(lowercase__ ) > 0:
a_ =unused_attributes
if len(lowercase__ ) > 0:
a_ ="The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(lowercase__ )
if __name__ == "__main__":
check_config_attributes()
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =b.T
a_ =np.sum(np.square(lowercase__ ) , axis=1 )
a_ =np.sum(np.square(lowercase__ ) , axis=0 )
a_ =np.matmul(lowercase__ , lowercase__ )
a_ =aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =x.reshape(-1 , 3 )
a_ =squared_euclidean_distance(lowercase__ , lowercase__ )
return np.argmin(lowercase__ , axis=1 )
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[str] = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =size if size is not None else {"height": 2_5_6, "width": 2_5_6}
a_ =get_size_dict(lowerCAmelCase_)
a_ =np.array(lowerCAmelCase_) if clusters is not None else None
a_ =do_resize
a_ =size
a_ =resample
a_ =do_normalize
a_ =do_color_quantize
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
a_ =get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""")
return resize(
lowerCAmelCase_ , size=(size["height"], size["width"]) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , ) -> np.ndarray:
"""simple docstring"""
a_ =rescale(image=lowerCAmelCase_ , scale=1 / 1_2_7.5 , data_format=lowerCAmelCase_)
a_ =image - 1
return image
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
"""simple docstring"""
a_ =do_resize if do_resize is not None else self.do_resize
a_ =size if size is not None else self.size
a_ =get_size_dict(lowerCAmelCase_)
a_ =resample if resample is not None else self.resample
a_ =do_normalize if do_normalize is not None else self.do_normalize
a_ =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a_ =clusters if clusters is not None else self.clusters
a_ =np.array(lowerCAmelCase_)
a_ =make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
a_ =[to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
a_ =[self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_) for image in images]
if do_normalize:
a_ =[self.normalize(image=lowerCAmelCase_) for image in images]
if do_color_quantize:
a_ =[to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a_ =np.array(lowerCAmelCase_)
a_ =color_quantize(lowerCAmelCase_ , lowerCAmelCase_).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
a_ =images.shape[0]
a_ =images.reshape(lowerCAmelCase_ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
a_ =list(lowerCAmelCase_)
else:
a_ =[to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_) for image in images]
a_ ={"input_ids": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[Any] = (PNDMScheduler,)
__magic_name__ : Any = (("num_inference_steps", 50),)
def lowercase_ ( self , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ ={
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase_)
return config
def lowercase_ ( self , lowerCAmelCase_=0 , **lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =dict(self.forward_default_kwargs)
a_ =kwargs.pop("num_inference_steps" , lowerCAmelCase_)
a_ =self.dummy_sample
a_ =0.1 * sample
a_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
a_ =self.get_scheduler_config(**lowerCAmelCase_)
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residuals
a_ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_)
a_ =scheduler_class.from_pretrained(lowerCAmelCase_)
new_scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residuals
a_ =dummy_past_residuals[:]
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ =scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self) -> Dict:
"""simple docstring"""
pass
def lowercase_ ( self , lowerCAmelCase_=0 , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =dict(self.forward_default_kwargs)
a_ =kwargs.pop("num_inference_steps" , lowerCAmelCase_)
a_ =self.dummy_sample
a_ =0.1 * sample
a_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residuals (must be after setting timesteps)
a_ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_)
a_ =scheduler_class.from_pretrained(lowerCAmelCase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residual (must be after setting timesteps)
a_ =dummy_past_residuals[:]
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ =scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config(**lowerCAmelCase_)
a_ =scheduler_class(**lowerCAmelCase_)
a_ =1_0
a_ =self.dummy_model()
a_ =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase_)
for i, t in enumerate(scheduler.prk_timesteps):
a_ =model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ =model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
return sample
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =dict(self.forward_default_kwargs)
a_ =kwargs.pop("num_inference_steps" , lowerCAmelCase_)
for scheduler_class in self.scheduler_classes:
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
a_ =self.dummy_sample
a_ =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase_ , "set_timesteps"):
scheduler.set_timesteps(lowerCAmelCase_)
elif num_inference_steps is not None and not hasattr(lowerCAmelCase_ , "set_timesteps"):
a_ =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
a_ =dummy_past_residuals[:]
a_ =scheduler.step_prk(lowerCAmelCase_ , 0 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =scheduler.step_prk(lowerCAmelCase_ , 1 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ =scheduler.step_plms(lowerCAmelCase_ , 0 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =scheduler.step_plms(lowerCAmelCase_ , 1 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase_)
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config(steps_offset=1)
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(1_0)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1]) , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =2_7
for scheduler_class in self.scheduler_classes:
a_ =self.dummy_sample
a_ =0.1 * sample
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
def lowercase_ ( self) -> Any:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_):
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.full_loop()
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 1_9_8.1_3_1_8) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0) < 1e-3
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.full_loop(prediction_type="v_prediction")
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 6_7.3_9_8_6) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8) < 1e-3
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.0_1)
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 2_3_0.0_3_9_9) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5) < 1e-3
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.0_1)
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 1_8_6.9_4_8_2) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4) < 1e-3
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=0.9 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
a_ =size if size is not None else {"shortest_edge": 3_0}
a_ =crop_size if crop_size is not None else {"height": 3_0, "width": 3_0}
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =min_resolution
a_ =max_resolution
a_ =do_resize_and_center_crop
a_ =size
a_ =crop_pct
a_ =crop_size
a_ =do_normalize
a_ =image_mean
a_ =image_std
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = PoolFormerImageProcessor if is_vision_available() else None
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =PoolFormerImageProcessingTester(self)
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(lowerCAmelCase_ , "size"))
self.assertTrue(hasattr(lowerCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std"))
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 3_0})
self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0})
a_ =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowercase = {
'''169M''': 768,
'''430M''': 1_024,
'''1B5''': 2_048,
'''3B''': 2_560,
'''7B''': 4_096,
'''14B''': 5_120,
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =list(state_dict.keys() )
for name in state_dict_keys:
a_ =state_dict.pop(lowercase__ )
# emb -> embedding
if name.startswith("emb." ):
a_ =name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
a_ =name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
a_ =re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , lowercase__ )
# ffn -> feed_forward
a_ =re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , lowercase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
a_ =name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
a_ =name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
a_ =name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
a_ ="rwkv." + name
a_ =weight
return state_dict
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
a_ =5_0_2_7_7
a_ =AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
a_ =PreTrainedTokenizerFast(tokenizer_file=lowercase__ )
a_ =len(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
# 2. Build the config
a_ =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
a_ =candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
a_ =RwkvConfig(
vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase__ )
# 3. Download model file then convert state_dict
a_ =hf_hub_download(lowercase__ , lowercase__ )
a_ =torch.load(lowercase__ , map_location="cpu" )
a_ =convert_state_dict(lowercase__ )
# 4. Split in shards and save
a_ , a_ =shard_checkpoint(lowercase__ )
for shard_file, shard in shards.items():
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if index is not None:
a_ =os.path.join(lowercase__ , lowercase__ )
# Save the index as well
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
a_ =json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + "\n"
f.write(lowercase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
a_ =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
a_ =torch.load(os.path.join(lowercase__ , lowercase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
a_ =AutoModelForCausalLM.from_pretrained(lowercase__ )
model.push_to_hub(lowercase__ , max_shard_size="2GB" )
tokenizer.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowercase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
from math import factorial, radians
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ):
'''simple docstring'''
a_ =angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
a_ =radians(lowercase__ )
a_ =angle_in_radians
a_ =3
a_ =-1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
a_ =-b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =Sql(
cache_dir=lowerCAmelCase_ , features=lowerCAmelCase_ , sql=lowerCAmelCase_ , con=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =None
a_ =None
a_ =None
a_ =None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , )
# Build dataset for splits
a_ =self.builder.as_dataset(
split="train" , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory)
return dataset
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""")
a_ =dataset
a_ =name
a_ =con
a_ =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a_ =num_proc
a_ =to_sql_kwargs
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.to_sql_kwargs.pop("sql" , lowerCAmelCase_)
a_ =self.to_sql_kwargs.pop("con" , lowerCAmelCase_)
a_ =self.to_sql_kwargs.pop("index" , lowerCAmelCase_)
a_ =self._write(index=lowerCAmelCase_ , **self.to_sql_kwargs)
return written
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ , a_ , a_ =args
a_ ={**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a_ =query_table(
table=self.dataset.data , key=slice(lowerCAmelCase_ , offset + self.batch_size) , indices=self.dataset._indices , )
a_ =batch.to_pandas()
a_ =df.to_sql(self.name , self.con , index=lowerCAmelCase_ , **lowerCAmelCase_)
return num_rows or len(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
a_ , a_ =len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase_ , lowerCAmelCase_)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
import heapq
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase__ , [-1 * len(lowercase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
a_ =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
a_ =heapq.heappop(lowercase__ )[1][0]
chosen_vertices.add(lowercase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
a_ =elem[1][1].index(lowercase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def UpperCAmelCase_ ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
a_ ="__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , lowercase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def UpperCAmelCase_ ( ):
assert _test_patching.open is open
a_ ="__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , lowercase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def UpperCAmelCase_ ( ):
a_ ="__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , lowercase__ ):
pass
def UpperCAmelCase_ ( ):
a_ ="__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , lowercase__ ) is None
with patch_submodule(_test_patching , "len" , lowercase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def UpperCAmelCase_ ( ):
a_ ="__test_patch_submodule_start_and_stop_mock__"
a_ =patch_submodule(_test_patching , "open" , lowercase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def UpperCAmelCase_ ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
a_ ="__test_patch_submodule_successive_join__"
a_ ="__test_patch_submodule_successive_dirname__"
a_ ="__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , lowercase__ ):
with patch_submodule(_test_patching , "os.rename" , lowercase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowercase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , lowercase__ ):
with patch_submodule(_test_patching , "os.path.join" , lowercase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowercase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def UpperCAmelCase_ ( ):
a_ ="__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , lowercase__ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , lowercase__ ):
pass
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =os.path.abspath(lowercase__ )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
a_ =tf.train.list_variables(lowercase__ )
a_ =[]
a_ =[]
a_ =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
a_ =full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
a_ =name[1:]
# figure out how many levels deep the name is
a_ =0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(lowercase__ )
# read data
a_ =tf.train.load_variable(lowercase__ , lowercase__ )
names.append("/".join(lowercase__ ) )
arrays.append(lowercase__ )
logger.info(F"""Read a total of {len(lowercase__ ):,} layers""" )
# Sanity check
if len(set(lowercase__ ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(lowercase__ ) )})""" )
a_ =list(set(lowercase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(lowercase__ , lowercase__ ):
a_ =full_name.split("/" )
a_ =model
a_ =[]
for i, m_name in enumerate(lowercase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
a_ =int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
a_ =getattr(lowercase__ , "embeddings" )
a_ =getattr(lowercase__ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
a_ =getattr(lowercase__ , "encoder" )
a_ =getattr(lowercase__ , "layer" )
a_ =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
a_ =getattr(lowercase__ , "pooler" )
a_ =getattr(lowercase__ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
a_ =getattr(lowercase__ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
a_ =getattr(lowercase__ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
a_ =getattr(lowercase__ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
a_ =getattr(lowercase__ , "token_type_embeddings" )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append("weight" )
a_ =getattr(lowercase__ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
a_ =getattr(lowercase__ , "attention" )
a_ =getattr(lowercase__ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
a_ =getattr(lowercase__ , "attention" )
a_ =getattr(lowercase__ , "output" )
a_ =getattr(lowercase__ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
a_ =getattr(lowercase__ , "attention" )
a_ =getattr(lowercase__ , "output" )
a_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
a_ =getattr(lowercase__ , "output" )
a_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
a_ =getattr(lowercase__ , "output" )
a_ =getattr(lowercase__ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
a_ =getattr(lowercase__ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
a_ =getattr(lowercase__ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
a_ =getattr(lowercase__ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
a_ =getattr(lowercase__ , "intermediate" )
a_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
a_ =getattr(lowercase__ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
a_ =getattr(lowercase__ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
a_ =getattr(lowercase__ , "weight" )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
a_ =".".join(lowercase__ )
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , lowercase__ ) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" , lowercase__ ):
a_ =array.reshape(pointer.data.shape )
if "kernel" in full_name:
a_ =array.transpose()
if pointer.shape == array.shape:
a_ =torch.from_numpy(lowercase__ )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
logger.info(F"""Loading model based on config from {config_path}...""" )
a_ =BertConfig.from_json_file(lowercase__ )
a_ =BertModel(lowercase__ )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowercase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model'''}
lowercase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowercase = '''▁'''
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Dict = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=1_0_0 , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
a_ =[f"""<extra_id_{i}>""" for i in range(lowerCAmelCase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a_ =len(set(filter(lambda lowerCAmelCase_: bool("extra_id" in str(lowerCAmelCase_)) , lowerCAmelCase_)))
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
a_ =legacy
a_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ =vocab_file
a_ =extra_ids
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase_)
@staticmethod
def lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
a_ =TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase_ , )
return max_model_length
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ ={self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase_)) + [1]
return ([0] * len(lowerCAmelCase_)) + [1] + ([0] * len(lowerCAmelCase_)) + [1]
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return list(
set(filter(lambda lowerCAmelCase_: bool(re.search(r"<extra_id_\d+>" , lowerCAmelCase_)) is not None , self.additional_special_tokens)))
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return [self._convert_token_to_id(lowerCAmelCase_) for token in self.get_sentinel_tokens()]
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
if len(lowerCAmelCase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =self._add_eos_if_not_present(lowerCAmelCase_)
if token_ids_a is None:
return token_ids_a
else:
a_ =self._add_eos_if_not_present(lowerCAmelCase_)
return token_ids_a + token_ids_a
def __getstate__( self) -> Any:
"""simple docstring"""
a_ =self.__dict__.copy()
a_ =None
return state
def __setstate__( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a_ ={}
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowercase_ ( self , lowerCAmelCase_ , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
if not self.legacy:
a_ =SPIECE_UNDERLINE + text.replace(lowerCAmelCase_ , " ")
return super().tokenize(lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
if not self.legacy:
a_ =text.startswith(lowerCAmelCase_)
if is_first:
a_ =text[1:]
a_ =self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowerCAmelCase_):
a_ =([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
if token.startswith("<extra_id_"):
a_ =re.match(r"<extra_id_(\d+)>" , lowerCAmelCase_)
a_ =int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
a_ =self.sp_model.IdToPiece(lowerCAmelCase_)
else:
a_ =f"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =[]
a_ =""
a_ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_) + token
a_ =True
a_ =[]
else:
current_sub_tokens.append(lowerCAmelCase_)
a_ =False
out_string += self.sp_model.decode(lowerCAmelCase_)
return out_string.strip()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , "wb") as fi:
a_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = XGLMTokenizer
__magic_name__ : Dict = XGLMTokenizerFast
__magic_name__ : List[Any] = True
__magic_name__ : Tuple = True
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a_ =XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ ="<pad>"
a_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase_) , 1_0_0_8)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
a_ =tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
a_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ ( self) -> str:
"""simple docstring"""
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name)
a_ =XGLMTokenizer(f.name , keep_accents=lowerCAmelCase_)
a_ =pickle.dumps(lowerCAmelCase_)
pickle.loads(lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a_ =self.get_tokenizer()
a_ =self.get_rust_tokenizer()
a_ ="I was born in 92000, and this is falsé."
a_ =tokenizer.tokenize(lowerCAmelCase_)
a_ =rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =self.get_rust_tokenizer()
a_ =tokenizer.encode(lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ ="Hello World!"
a_ =[2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_))
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a_ =[2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_))
@slow
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ={
"input_ids": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase_ , )
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import os
import platform
import sys
lowercase = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =len(lowercase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase__ , lowercase__ , )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[]
depth_first_search([] , [] , [] , lowercase__ , lowercase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase__ )
print("" )
print(len(lowercase__ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ , a_ =emb.weight.shape
a_ =nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
a_ =emb.weight.data
return lin_layer
def UpperCAmelCase_ ( lowercase__ , lowercase__="facebook/mbart-large-en-ro" , lowercase__=False , lowercase__=False ):
'''simple docstring'''
a_ =torch.load(lowercase__ , map_location="cpu" )["model"]
remove_ignore_keys_(lowercase__ )
a_ =state_dict["encoder.embed_tokens.weight"].shape[0]
a_ =MBartConfig.from_pretrained(lowercase__ , vocab_size=lowercase__ )
if mbart_aa and finetuned:
a_ ="relu"
a_ =state_dict["decoder.embed_tokens.weight"]
a_ =MBartForConditionalGeneration(lowercase__ )
model.model.load_state_dict(lowercase__ )
if finetuned:
a_ =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowercase = parser.parse_args()
lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =FunnelConfig.from_json_file(lowercase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a_ =FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import os
import sys
import transformers
lowercase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
from itertools import permutations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a_ =[7, 1_1, 1_3, 1_7]
for i, test in enumerate(lowercase__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase_ ( lowercase__ = 1_0 ):
'''simple docstring'''
return sum(
int("".join(map(lowercase__ , lowercase__ ) ) )
for num in permutations(range(lowercase__ ) )
if is_substring_divisible(lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =4
a_ =3
a_ =(3_2, 3_2)
a_ =jax.random.PRNGKey(0)
a_ =jax.random.uniform(lowerCAmelCase_ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ={
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
a_ =self.dummy_input
return init_dict, inputs_dict
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowercase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
a_ =key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
a_ =key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ =key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
a_ =key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ ={}
import re
a_ =re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a_ =re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a_ =re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
a_ =re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase__ ):
a_ =re_encoder_block_conv_in.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] )
a_ =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
a_ =re_encoder_block_conv_in.sub(lowercase__ , lowercase__ )
elif re_encoder_block_resnet.fullmatch(lowercase__ ):
a_ =re_encoder_block_resnet.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] )
a_ ={"1": 1, "3": 2}[groups[-2]]
a_ =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
a_ =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ =prefix + resnet_block
a_ =re_encoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_encoder_block_proj_out.fullmatch(lowercase__ ):
a_ =re_encoder_block_proj_out.match(lowercase__ )
a_ =regex_match.groups()
a_ =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
a_ =re_encoder_block_proj_out.sub(lowercase__ , lowercase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase__ ):
a_ =re_decoder_block_conv_out.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
a_ =re_decoder_block_conv_out.sub(lowercase__ , lowercase__ )
elif re_decoder_block_resnet.fullmatch(lowercase__ ):
a_ =re_decoder_block_resnet.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ ={"1": 1, "3": 2}[groups[-2]]
a_ =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
a_ =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ =prefix + resnet_block
a_ =re_decoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_decoder_block_proj_in.fullmatch(lowercase__ ):
a_ =re_decoder_block_proj_in.match(lowercase__ )
a_ =regex_match.groups()
a_ =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
a_ =re_decoder_block_proj_in.sub(lowercase__ , lowercase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase__ ):
a_ =re_prior_cond_conv_out.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ =F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
a_ =re_prior_cond_conv_out.sub(lowercase__ , lowercase__ )
elif re_prior_cond_resnet.fullmatch(lowercase__ ):
a_ =re_prior_cond_resnet.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ ={"1": 1, "3": 2}[groups[-2]]
a_ =F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
a_ =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ =prefix + resnet_block
a_ =re_prior_cond_resnet.sub(lowercase__ , lowercase__ )
elif re_prior_cond_proj_in.fullmatch(lowercase__ ):
a_ =re_prior_cond_proj_in.match(lowercase__ )
a_ =regex_match.groups()
a_ =F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
a_ =re_prior_cond_proj_in.sub(lowercase__ , lowercase__ )
# keep original key
else:
a_ =original_key
a_ =replace_key(lowercase__ )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
a_ =model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
a_ =original_key
a_ =original_key
a_ =value
return new_dict
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__=None , lowercase__=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
a_ =requests.get(F"""{PREFIX}{file}""" , allow_redirects=lowercase__ )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=lowercase__ )
open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , "wb" ).write(r.content )
a_ =MODEL_MAPPING[model_name.split("/" )[-1]]
a_ =JukeboxConfig.from_pretrained(lowercase__ )
a_ =JukeboxModel(lowercase__ )
a_ =[]
a_ ={}
for i, dict_name in enumerate(lowercase__ ):
a_ =torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )["model"]
a_ ={}
for k in old_dic.keys():
if k.endswith(".b" ):
a_ =old_dic[k]
elif k.endswith(".w" ):
a_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ =old_dic[k]
else:
a_ =old_dic[k]
a_ ="vqvae" if i == 0 else F"""priors.{3 - i}"""
a_ =fix_jukebox_keys(lowercase__ , model.state_dict() , lowercase__ , lowercase__ )
weight_dict.append(lowercase__ )
a_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase__ )
for i in range(len(lowercase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(lowercase__ , lowercase__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
return weight_dict
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowercase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowercase = parser.parse_args()
if args.model_type == "bert":
lowercase = BertForMaskedLM.from_pretrained(args.model_name)
lowercase = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowercase = model.state_dict()
lowercase = {}
for w in ["word_embeddings", "position_embeddings"]:
lowercase = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowercase = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowercase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowercase = state_dict['''cls.predictions.decoder.weight''']
lowercase = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase = state_dict[F"""cls.predictions.transform.dense.{w}"""]
lowercase = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = UnCLIPImageVariationPipeline
__magic_name__ : Union[str, Any] = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
__magic_name__ : Dict = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : Optional[int] = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
__magic_name__ : Union[str, Any] = False
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return 1_0_0
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowerCAmelCase_)
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowerCAmelCase_)
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
a_ ={
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
a_ =UnCLIPTextProjModel(**lowerCAmelCase_)
return model
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
a_ ={
"sample_size": 3_2,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
a_ =UNetaDConditionModel(**lowerCAmelCase_)
return model
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
torch.manual_seed(1)
a_ =UNetaDModel(**self.dummy_super_res_kwargs)
return model
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.dummy_decoder
a_ =self.dummy_text_proj
a_ =self.dummy_text_encoder
a_ =self.dummy_tokenizer
a_ =self.dummy_super_res_first
a_ =self.dummy_super_res_last
a_ =UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1_0_0_0 , )
a_ =UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1_0_0_0 , )
a_ =CLIPImageProcessor(crop_size=3_2 , size=3_2)
a_ =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0 , lowerCAmelCase_=True) -> List[Any]:
"""simple docstring"""
a_ =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
if str(lowerCAmelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCAmelCase_)
else:
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
if pil_image:
a_ =input_image * 0.5 + 0.5
a_ =input_image.clamp(0 , 1)
a_ =input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ =DiffusionPipeline.numpy_to_pil(lowerCAmelCase_)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(**lowerCAmelCase_)
a_ =output.images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ =np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(**lowerCAmelCase_)
a_ =output.images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ =np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =[
pipeline_inputs["image"],
pipeline_inputs["image"],
]
a_ =pipe(**lowerCAmelCase_)
a_ =output.images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =[
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
a_ =pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
a_ =np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =torch.device("cpu")
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : Optional[int] = 1
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(0)
a_ =pipe.decoder.dtype
a_ =1
a_ =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
a_ =pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler())
a_ =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
a_ =pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler())
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_).images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
# Don't pass image, instead pass embedding
a_ =pipeline_inputs.pop("image")
a_ =pipe.image_encoder(lowerCAmelCase_).image_embeds
a_ =pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_ , image_embeddings=lowerCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
a_ =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase_ , expected_max_diff=lowerCAmelCase_)
@skip_mps
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =torch_device == "cpu"
a_ =True
a_ =[
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =[
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
a_ =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCAmelCase_)
@skip_mps
def lowercase_ ( self) -> str:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowercase_ ( self) -> str:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png")
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy")
a_ =UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa)
a_ =pipeline.to(lowerCAmelCase_)
pipeline.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =torch.Generator(device="cpu").manual_seed(0)
a_ =pipeline(
lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ , 1_5)
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[str] = CpmAntTokenizer
__magic_name__ : Dict = False
def lowercase_ ( self) -> Dict:
"""simple docstring"""
super().setUp()
a_ =[
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@tooslow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
a_ ="今天天气真好!"
a_ =["今天", "天气", "真", "好", "!"]
a_ =tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ ="今天天气真好!"
a_ =[tokenizer.bos_token] + tokens
a_ =[6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
a_ =tokenizer.decode(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
a_ =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase__ ) )
return round(lowercase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =FileLock(str(tmpdir / "foo.lock" ) )
a_ =FileLock(str(tmpdir / "foo.lock" ) )
a_ =0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
a_ =time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ="a" * 1_0_0_0 + ".lock"
a_ =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
a_ =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : "DiagonalGaussianDistribution"
class UpperCAmelCase ( __a , __a):
'''simple docstring'''
__magic_name__ : List[str] = True
@register_to_config
def __init__( self , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = ("DownEncoderBlock2D",) , lowerCAmelCase_ = ("UpDecoderBlock2D",) , lowerCAmelCase_ = (6_4,) , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "silu" , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 3_2 , lowerCAmelCase_ = 3_2 , lowerCAmelCase_ = 0.1_8_2_1_5 , ) -> str:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
a_ =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
# pass init params to Decoder
a_ =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , )
a_ =nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
a_ =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1)
a_ =False
a_ =False
# only relevant if vae tiling is enabled
a_ =self.config.sample_size
a_ =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
a_ =int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
a_ =0.2_5
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Tuple:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (Encoder, Decoder)):
a_ =value
def lowercase_ ( self , lowerCAmelCase_ = True) -> Union[str, Any]:
"""simple docstring"""
a_ =use_tiling
def lowercase_ ( self) -> str:
"""simple docstring"""
self.enable_tiling(lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =True
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
a_ ={}
def fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
if hasattr(lowerCAmelCase_ , "set_processor"):
a_ =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCAmelCase_ , lowerCAmelCase_)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return processors
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =len(self.attn_processors.keys())
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and len(lowerCAmelCase_) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCAmelCase_)} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""")
def fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
if hasattr(lowerCAmelCase_ , "set_processor"):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
module.set_processor(lowerCAmelCase_)
else:
module.set_processor(processor.pop(f"""{name}.processor"""))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCAmelCase_ , lowerCAmelCase_)
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCAmelCase_ , return_dict=lowerCAmelCase_)
if self.use_slicing and x.shape[0] > 1:
a_ =[self.encoder(lowerCAmelCase_) for x_slice in x.split(1)]
a_ =torch.cat(lowerCAmelCase_)
else:
a_ =self.encoder(lowerCAmelCase_)
a_ =self.quant_conv(lowerCAmelCase_)
a_ =DiagonalGaussianDistribution(lowerCAmelCase_)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCAmelCase_ , return_dict=lowerCAmelCase_)
a_ =self.post_quant_conv(lowerCAmelCase_)
a_ =self.decoder(lowerCAmelCase_)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_)
@apply_forward_hook
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
a_ =[self._decode(lowerCAmelCase_).sample for z_slice in z.split(1)]
a_ =torch.cat(lowerCAmelCase_)
else:
a_ =self._decode(lowerCAmelCase_).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =min(a.shape[2] , b.shape[2] , lowerCAmelCase_)
for y in range(lowerCAmelCase_):
a_ =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =min(a.shape[3] , b.shape[3] , lowerCAmelCase_)
for x in range(lowerCAmelCase_):
a_ =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> AutoencoderKLOutput:
"""simple docstring"""
a_ =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
a_ =int(self.tile_latent_min_size * self.tile_overlap_factor)
a_ =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a_ =[]
for i in range(0 , x.shape[2] , lowerCAmelCase_):
a_ =[]
for j in range(0 , x.shape[3] , lowerCAmelCase_):
a_ =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a_ =self.encoder(lowerCAmelCase_)
a_ =self.quant_conv(lowerCAmelCase_)
row.append(lowerCAmelCase_)
rows.append(lowerCAmelCase_)
a_ =[]
for i, row in enumerate(lowerCAmelCase_):
a_ =[]
for j, tile in enumerate(lowerCAmelCase_):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ =self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_)
if j > 0:
a_ =self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowerCAmelCase_ , dim=3))
a_ =torch.cat(lowerCAmelCase_ , dim=2)
a_ =DiagonalGaussianDistribution(lowerCAmelCase_)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a_ =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
a_ =int(self.tile_sample_min_size * self.tile_overlap_factor)
a_ =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a_ =[]
for i in range(0 , z.shape[2] , lowerCAmelCase_):
a_ =[]
for j in range(0 , z.shape[3] , lowerCAmelCase_):
a_ =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a_ =self.post_quant_conv(lowerCAmelCase_)
a_ =self.decoder(lowerCAmelCase_)
row.append(lowerCAmelCase_)
rows.append(lowerCAmelCase_)
a_ =[]
for i, row in enumerate(lowerCAmelCase_):
a_ =[]
for j, tile in enumerate(lowerCAmelCase_):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ =self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_)
if j > 0:
a_ =self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowerCAmelCase_ , dim=3))
a_ =torch.cat(lowerCAmelCase_ , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a_ =sample
a_ =self.encode(lowerCAmelCase_).latent_dist
if sample_posterior:
a_ =posterior.sample(generator=lowerCAmelCase_)
else:
a_ =posterior.mode()
a_ =self.decode(lowerCAmelCase_).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_)
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase = logging.get_logger(__name__)
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase_ , )
super().__init__(args=lowerCAmelCase_ , **lowerCAmelCase_)
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = "yolos"
def __init__( self , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3_0_7_2 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=[5_1_2, 8_6_4] , lowerCAmelCase_=1_6 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=1_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , **lowerCAmelCase_ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =initializer_range
a_ =layer_norm_eps
a_ =image_size
a_ =patch_size
a_ =num_channels
a_ =qkv_bias
a_ =num_detection_tokens
a_ =use_mid_position_embeddings
a_ =auxiliary_loss
# Hungarian matcher
a_ =class_cost
a_ =bbox_cost
a_ =giou_cost
# Loss coefficients
a_ =bbox_loss_coefficient
a_ =giou_loss_coefficient
a_ =eos_coefficient
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Tuple = version.parse("1.11")
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self) -> float:
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return 1_2
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_input_mask
a_ =use_token_type_ids
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =num_choices
a_ =scope
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_input_mask:
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self) -> str:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.prepare_config_and_inputs()
a_ =True
a_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =NezhaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> int:
"""simple docstring"""
a_ =True
a_ =NezhaModel(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =NezhaForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =NezhaForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =NezhaForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =NezhaForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =self.num_labels
a_ =NezhaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =self.num_labels
a_ =NezhaForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =self.num_choices
a_ =NezhaForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =config_and_inputs
a_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __a , __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : List[Any] = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ : List[str] = True
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> Optional[int]:
"""simple docstring"""
a_ =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
a_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
a_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =NezhaModelTester(self)
a_ =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
a_ =None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_)
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =NezhaModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
@require_torch_gpu
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a_ =True
a_ =model_class(config=lowerCAmelCase_)
a_ =self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
a_ =torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "bert.pt"))
a_ =torch.jit.load(os.path.join(lowerCAmelCase_ , "bert.pt") , map_location=lowerCAmelCase_)
loaded(inputs_dict["input_ids"].to(lowerCAmelCase_) , inputs_dict["attention_mask"].to(lowerCAmelCase_))
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =NezhaModel.from_pretrained("sijunhe/nezha-cn-base")
a_ =torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ =torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
a_ =torch.Size((1, 6, 7_6_8))
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4))
@slow
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base")
a_ =torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ =torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
a_ =torch.Size((1, 6, 2_1_1_2_8))
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4))
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
lowercase = None
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowercase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowercase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ =bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def remove_articles(lowercase__ ):
return ARTICLES_REGEX.sub(" " , lowercase__ )
def white_space_fix(lowercase__ ):
return " ".join(text.split() )
def remove_punc(lowercase__ ):
a_ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase__ ) ) ) )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowercase__ ).split()
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return int(normalize_answer(lowercase__ ) == normalize_answer(lowercase__ ) )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =get_tokens(lowercase__ )
a_ =get_tokens(lowercase__ )
a_ =collections.Counter(lowercase__ ) & collections.Counter(lowercase__ )
a_ =sum(common.values() )
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a_ =1.0 * num_same / len(lowercase__ )
a_ =1.0 * num_same / len(lowercase__ )
a_ =(2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ ={}
a_ ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ =qa["id"]
a_ =[t for t in qa["answers"]["text"] if normalize_answer(lowercase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a_ =[""]
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
a_ =preds[qid]
# Take max over all gold answers
a_ =max(compute_exact(lowercase__ , lowercase__ ) for a in gold_answers )
a_ =max(compute_fa(lowercase__ , lowercase__ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ ={}
for qid, s in scores.items():
a_ =na_probs[qid] > na_prob_thresh
if pred_na:
a_ =float(not qid_to_has_ans[qid] )
else:
a_ =s
return new_scores
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
if not qid_list:
a_ =len(lowercase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
a_ =len(lowercase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for k in new_eval:
a_ =new_eval[k]
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
plt.step(lowercase__ , lowercase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowercase__ , lowercase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase__ )
plt.savefig(lowercase__ )
plt.clf()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
'''simple docstring'''
a_ =sorted(lowercase__ , key=lambda lowercase__ : na_probs[k] )
a_ =0.0
a_ =1.0
a_ =0.0
a_ =[1.0]
a_ =[0.0]
a_ =0.0
for i, qid in enumerate(lowercase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a_ =true_pos / float(i + 1 )
a_ =true_pos / float(lowercase__ )
if i == len(lowercase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase__ )
recalls.append(lowercase__ )
if out_image:
plot_pr_curve(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return {"ap": 100.0 * avg_prec}
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
a_ =sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a_ =make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
a_ =make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
a_ ={k: float(lowercase__ ) for k, v in qid_to_has_ans.items()}
a_ =make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowercase__ , lowercase__ , "pr_exact" )
merge_eval(lowercase__ , lowercase__ , "pr_f1" )
merge_eval(lowercase__ , lowercase__ , "pr_oracle" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if not qid_list:
return
a_ =[na_probs[k] for k in qid_list]
a_ =np.ones_like(lowercase__ ) / float(len(lowercase__ ) )
plt.hist(lowercase__ , weights=lowercase__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowercase__ , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a_ =num_no_ans
a_ =cur_score
a_ =0.0
a_ =sorted(lowercase__ , key=lambda lowercase__ : na_probs[k] )
for i, qid in enumerate(lowercase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a_ =scores[qid]
else:
if preds[qid]:
a_ =-1
else:
a_ =0
cur_score += diff
if cur_score > best_score:
a_ =cur_score
a_ =na_probs[qid]
return 100.0 * best_score / len(lowercase__ ), best_thresh
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ , a_ =find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =best_exact
a_ =exact_thresh
a_ =best_fa
a_ =fa_thresh
def UpperCAmelCase_ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
a_ =json.load(lowercase__ )
a_ =dataset_json["data"]
with open(OPTS.pred_file ) as f:
a_ =json.load(lowercase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a_ =json.load(lowercase__ )
else:
a_ ={k: 0.0 for k in preds}
a_ =make_qid_to_has_ans(lowercase__ ) # maps qid to True/False
a_ =[k for k, v in qid_to_has_ans.items() if v]
a_ =[k for k, v in qid_to_has_ans.items() if not v]
a_ , a_ =get_raw_scores(lowercase__ , lowercase__ )
a_ =apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh )
a_ =apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh )
a_ =make_eval_dict(lowercase__ , lowercase__ )
if has_ans_qids:
a_ =make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ )
merge_eval(lowercase__ , lowercase__ , "HasAns" )
if no_ans_qids:
a_ =make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ )
merge_eval(lowercase__ , lowercase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , OPTS.out_image_dir )
histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
else:
print(json.dumps(lowercase__ , indent=2 ) )
if __name__ == "__main__":
lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self) -> List[Any]:
"""simple docstring"""
a_ =[]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
self.events.append("on_init_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
self.events.append("on_train_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
self.events.append("on_train_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
self.events.append("on_epoch_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_epoch_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
self.events.append("on_step_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
self.events.append("on_step_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_evaluate")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
self.events.append("on_predict")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> str:
"""simple docstring"""
self.events.append("on_save")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
self.events.append("on_log")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_prediction_step")
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =tempfile.mkdtemp()
def lowercase_ ( self) -> Any:
"""simple docstring"""
shutil.rmtree(self.output_dir)
def lowercase_ ( self , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=6_4 , lowerCAmelCase_=6_4 , lowerCAmelCase_=None , lowerCAmelCase_=False , **lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =RegressionDataset(length=lowerCAmelCase_)
a_ =RegressionDataset(length=lowerCAmelCase_)
a_ =RegressionModelConfig(a=lowerCAmelCase_ , b=lowerCAmelCase_)
a_ =RegressionPreTrainedModel(lowerCAmelCase_)
a_ =TrainingArguments(self.output_dir , disable_tqdm=lowerCAmelCase_ , report_to=[] , **lowerCAmelCase_)
return Trainer(
lowerCAmelCase_ , lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , callbacks=lowerCAmelCase_ , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
# Order doesn't matter
a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cb.__class__.__name__)
a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cb.__class__.__name__)
for cba, cba in zip(lowerCAmelCase_ , lowerCAmelCase_):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(lowerCAmelCase_ , cba.__class__)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(cba.__class__ , lowerCAmelCase_)
else:
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =["on_init_end", "on_train_begin"]
a_ =0
a_ =len(trainer.get_eval_dataloader())
a_ =["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("on_epoch_begin")
for _ in range(lowerCAmelCase_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save")
expected_events.append("on_epoch_end")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.get_trainer()
a_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# Callbacks passed at init are added to the default callbacks
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
a_ =self.get_trainer(disable_tqdm=lowerCAmelCase_)
a_ =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
a_ =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCAmelCase_)
expected_callbacks.remove(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
a_ =self.get_trainer()
a_ =trainer.pop_callback(lowerCAmelCase_)
self.assertEqual(cb.__class__ , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
trainer.add_callback(lowerCAmelCase_)
expected_callbacks.insert(0 , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# We can also add, pop, or remove by instance
a_ =self.get_trainer()
a_ =trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCAmelCase_)
expected_callbacks.remove(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
a_ =self.get_trainer()
a_ =trainer.callback_handler.callbacks[0]
a_ =trainer.pop_callback(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
trainer.add_callback(lowerCAmelCase_)
expected_callbacks.insert(0 , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=lowerCAmelCase_)
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# Independent log/save/eval
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps")
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch")
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# A bit of everything
a_ =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
a_ =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCAmelCase_) in warn_mock.call_args[0][0]
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "trocr"
__magic_name__ : Optional[int] = ["past_key_values"]
__magic_name__ : List[Any] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , lowerCAmelCase_=5_0_2_6_5 , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_="gelu" , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> Union[str, Any]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =decoder_layers
a_ =decoder_attention_heads
a_ =decoder_ffn_dim
a_ =activation_function
a_ =max_position_embeddings
a_ =dropout
a_ =attention_dropout
a_ =activation_dropout
a_ =init_std
a_ =decoder_layerdrop
a_ =use_cache
a_ =scale_embedding
a_ =use_learned_position_embeddings
a_ =layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
a_ =hex_num[0] == "-"
if is_negative:
a_ =hex_num[1:]
try:
a_ =int(lowercase__ , 1_6 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
a_ =""
while int_num > 0:
a_ =str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ = 5_0 ):
'''simple docstring'''
a_ =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =0
a_ =len(lowercase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowercase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) <= 1:
return arr, 0
a_ =len(lowercase__ ) // 2
a_ =arr[0:mid]
a_ =arr[mid:]
a_ , a_ =count_inversions_recursive(lowercase__ )
a_ , a_ =count_inversions_recursive(lowercase__ )
a_ , a_ =_count_cross_inversions(lowercase__ , lowercase__ )
a_ =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[]
a_ =a_ =a_ =0
while i < len(lowercase__ ) and j < len(lowercase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =[1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
a_ =count_inversions_bf(lowercase__ )
a_ , a_ =count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowercase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
a_ =count_inversions_bf(lowercase__ )
a_ , a_ =count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase__ )
# an empty list should also have zero inversions
a_ =[]
a_ =count_inversions_bf(lowercase__ )
a_ , a_ =count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase__ )
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Tuple = GPTaTokenizer
__magic_name__ : Optional[int] = GPTaTokenizerFast
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[Any] = {"add_prefix_space": True}
__magic_name__ : List[str] = False
def lowercase_ ( self) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
a_ =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
a_ =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a_ ={"unk_token": "<unk>"}
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(lowerCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(lowerCAmelCase_))
def lowercase_ ( self , **lowerCAmelCase_) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_)
def lowercase_ ( self , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ ="lower newer"
a_ ="lower newer"
return input_text, output_text
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a_ ="lower newer"
a_ =["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
a_ =tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =tokens + [tokenizer.unk_token]
a_ =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a_ =self.get_tokenizer()
a_ =self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_)
a_ ="lower newer"
# Testing tokenization
a_ =tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_)
a_ =rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
# Testing conversion to ids without special tokens
a_ =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
# Testing conversion to ids with special tokens
a_ =self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_)
a_ =tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
# Testing the unknown token
a_ =tokens + [rust_tokenizer.unk_token]
a_ =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
pass
def lowercase_ ( self , lowerCAmelCase_=1_5) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a_ =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
# Simple input
a_ ="This is a simple input"
a_ =["This is a simple input 1", "This is a simple input 2"]
a_ =("This is a simple input", "This is a pair")
a_ =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length")
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length")
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length")
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length")
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" , )
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>")
# Simple input
a_ ="This is a simple input"
a_ =["This is a simple input looooooooong", "This is a simple input"]
a_ =("This is a simple input", "This is a pair")
a_ =[
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
a_ =tokenizer.pad_token_id
a_ =tokenizer(lowerCAmelCase_ , padding="max_length" , max_length=3_0 , return_tensors="np")
a_ =tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="np")
a_ =tokenizer(*lowerCAmelCase_ , padding="max_length" , max_length=6_0 , return_tensors="np")
a_ =tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 3_0)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0])
self.assertFalse(0 in out_sa["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1])
self.assertTrue(0 in out_sa["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 6_0)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0])
self.assertFalse(0 in out_pa["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1])
self.assertTrue(0 in out_pa["attention_mask"][1])
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ ="$$$"
a_ =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_)
a_ ="This is a simple input"
a_ =["This is a simple input 1", "This is a simple input 2"]
a_ =tokenizer.bos_token_id
a_ =tokenizer(lowerCAmelCase_)
a_ =tokenizer(lowerCAmelCase_)
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
a_ =tokenizer.decode(out_s.input_ids)
a_ =tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , lowerCAmelCase_)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =[self.get_tokenizer(do_lower_case=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_)]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
a_ ="Encode this."
a_ ="This one too please."
a_ =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
encoded_sequence += tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer.encode_plus(
lowerCAmelCase_ , lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , )
a_ =encoded_sequence_dict["input_ids"]
a_ =encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
a_ =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase_)
]
a_ =[x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase_)
a_ ="A photo of a cat"
a_ =tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
tokenizer.save_pretrained("test_opt")
a_ =AutoTokenizer.from_pretrained("./test_opt")
a_ =tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowerCAmelCase_)
a_ ="A photo of a cat"
a_ =tokenizer.encode(
lowerCAmelCase_ , )
# Same as above
self.assertEqual(lowerCAmelCase_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
@unittest.skip("This test is failing because of a bug in the fast tokenizer")
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase_)
a_ ="bos"
a_ =tokenizer.get_vocab()["bos"]
a_ ="A photo of a cat"
a_ =tokenizer.encode(
lowerCAmelCase_ , )
# We changed the bos token
self.assertEqual(lowerCAmelCase_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
tokenizer.save_pretrained("./tok")
a_ =AutoTokenizer.from_pretrained("./tok")
self.assertTrue(tokenizer.is_fast)
a_ =tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
import math
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 0 , lowercase__ = 0 ):
'''simple docstring'''
a_ =end or len(lowercase__ )
for i in range(lowercase__ , lowercase__ ):
a_ =i
a_ =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a_ =array[temp_index - 1]
temp_index -= 1
a_ =temp_index_value
return array
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ): # Max Heap
'''simple docstring'''
a_ =index
a_ =2 * index + 1 # Left Node
a_ =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a_ =left_index
if right_index < heap_size and array[largest] < array[right_index]:
a_ =right_index
if largest != index:
a_ , a_ =array[largest], array[index]
heapify(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =len(lowercase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowercase__ , lowercase__ , lowercase__ )
for i in range(n - 1 , 0 , -1 ):
a_ , a_ =array[0], array[i]
heapify(lowercase__ , 0 , lowercase__ )
return array
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =low
a_ =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a_ , a_ =array[j], array[i]
i += 1
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return array
a_ =2 * math.ceil(math.loga(len(lowercase__ ) ) )
a_ =1_6
return intro_sort(lowercase__ , 0 , len(lowercase__ ) , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase__ )
max_depth -= 1
a_ =median_of_a(lowercase__ , lowercase__ , start + ((end - start) // 2) + 1 , end - 1 )
a_ =partition(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
intro_sort(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =p
return insertion_sort(lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = input('''Enter numbers separated by a comma : ''').strip()
lowercase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self) -> None:
"""simple docstring"""
a_ =[2, 1, 2, -1]
a_ =[1, 2, 3, 4]
def lowercase_ ( self) -> list[float]:
"""simple docstring"""
a_ =len(self.first_signal)
a_ =len(self.second_signal)
a_ =max(lowerCAmelCase_ , lowerCAmelCase_)
# create a zero matrix of max_length x max_length
a_ =[[0] * max_length for i in range(lowerCAmelCase_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase_):
a_ =deque(self.second_signal)
rotated_signal.rotate(lowerCAmelCase_)
for j, item in enumerate(lowerCAmelCase_):
matrix[i][j] += item
# multiply the matrix with the first signal
a_ =np.matmul(np.transpose(lowerCAmelCase_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCAmelCase_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
a_ ="segformer.encoder." + key
if key.startswith("backbone" ):
a_ =key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a_ =key[key.find("patch_embed" ) + len("patch_embed" )]
a_ =key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowercase__ )-1}""" )
if "norm" in key:
a_ =key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a_ =key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
a_ =key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowercase__ )-1}""" )
if "layer_norm1" in key:
a_ =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
a_ =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
a_ =key[key.find("block" ) + len("block" )]
a_ =key.replace(F"""block{idx}""" , F"""block.{int(lowercase__ )-1}""" )
if "attn.q" in key:
a_ =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
a_ =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
a_ =key.replace("attn" , "attention.self" )
if "fc1" in key:
a_ =key.replace("fc1" , "dense1" )
if "fc2" in key:
a_ =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
a_ =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
a_ =key.replace("linear_fuse.conv" , "linear_fuse" )
a_ =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a_ =key[key.find("linear_c" ) + len("linear_c" )]
a_ =key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowercase__ )-1}""" )
if key.startswith("head" ):
a_ =key.replace("head" , "classifier" )
a_ =value
return new_state_dict
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a_ =state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
a_ =state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
a_ =kv_weight[
: config.hidden_sizes[i], :
]
a_ =kv_bias[: config.hidden_sizes[i]]
a_ =kv_weight[
config.hidden_sizes[i] :, :
]
a_ =kv_bias[
config.hidden_sizes[i] :
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =SegformerConfig()
a_ =False
# set attributes based on model_name
a_ ="huggingface/label-files"
if "segformer" in model_name:
a_ =model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
a_ =1_5_0
a_ ="ade20k-id2label.json"
a_ =(1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
a_ =1_9
a_ ="cityscapes-id2label.json"
a_ =(1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
a_ =True
a_ =model_name[4:6]
a_ =1_0_0_0
a_ ="imagenet-1k-id2label.json"
a_ =(1, 1_0_0_0)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =2_5_6
elif size == "b2":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 4, 6, 3]
elif size == "b3":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 4, 1_8, 3]
elif size == "b4":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 8, 2_7, 3]
elif size == "b5":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 6, 4_0, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
a_ =SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowercase__ , align=lowercase__ , do_random_crop=lowercase__ )
# prepare image
a_ =prepare_img()
a_ =image_processor(images=lowercase__ , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
a_ =torch.load(lowercase__ , map_location=torch.device("cpu" ) )
else:
a_ =torch.load(lowercase__ , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
a_ =rename_keys(lowercase__ , encoder_only=lowercase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
if encoder_only:
a_ =False
a_ =SegformerForImageClassification(lowercase__ )
else:
a_ =SegformerForSemanticSegmentation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
a_ =model(lowercase__ )
a_ =outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
a_ =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
a_ =torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
a_ =torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
a_ =torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
a_ =torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
a_ =torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
a_ =torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
a_ =torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
a_ =torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
a_ =logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowercase = None
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
lowercase = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
lowercase = '''▁'''
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : int = BigBirdTokenizer
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_="[CLS]" , **lowerCAmelCase_ , ) -> int:
"""simple docstring"""
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else unk_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ =vocab_file
a_ =False if not self.vocab_file else True
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1] + ([0] * len(lowerCAmelCase_)) + [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_):
copyfile(self.vocab_file , lowerCAmelCase_)
return (out_vocab_file,)
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowercase = {
'''yjernite/retribert-base-uncased''': 512,
}
lowercase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[Any] = VOCAB_FILES_NAMES
__magic_name__ : int = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Dict = RetriBertTokenizer
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase_) != tokenize_chinese_chars
):
a_ =getattr(lowerCAmelCase_ , normalizer_state.pop("type"))
a_ =do_lower_case
a_ =strip_accents
a_ =tokenize_chinese_chars
a_ =normalizer_class(**lowerCAmelCase_)
a_ =do_lower_case
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None) -> List[str]:
"""simple docstring"""
a_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = TextToVideoSDPipeline
__magic_name__ : int = TEXT_TO_IMAGE_PARAMS
__magic_name__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__magic_name__ : List[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def lowercase_ ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0)
a_ =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
a_ =CLIPTextModel(lowerCAmelCase_)
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a_ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCAmelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCAmelCase_)
else:
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
a_ ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ="cpu" # ensure determinism for the device-dependent torch.Generator
a_ =self.get_dummy_components()
a_ =TextToVideoSDPipeline(**lowerCAmelCase_)
a_ =sd_pipe.to(lowerCAmelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_)
a_ ="np"
a_ =sd_pipe(**lowerCAmelCase_).frames
a_ =frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ =np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=1e-2)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def lowercase_ ( self) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def lowercase_ ( self) -> str:
"""simple docstring"""
pass
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy")
a_ =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
a_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a_ =pipe.to("cuda")
a_ ="Spiderman is surfing"
a_ =torch.Generator(device="cpu").manual_seed(0)
a_ =pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2_5 , output_type="pt").frames
a_ =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy")
a_ =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
a_ =pipe.to("cuda")
a_ ="Spiderman is surfing"
a_ =torch.Generator(device="cpu").manual_seed(0)
a_ =pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="pt").frames
a_ =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
import os
from pathlib import Path
def UpperCAmelCase_ ( ):
'''simple docstring'''
from torch.utils.cpp_extension import load
a_ =Path(lowercase__ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
a_ =[
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase__ , with_cuda=lowercase__ , extra_include_paths=[str(lowercase__ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Tuple = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : Dict ) -> Any:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]="" , UpperCamelCase__ : List[str]="." ):
lowerCamelCase : List[Any] = []
for k, v in d.items():
lowerCamelCase : int = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase__ , UpperCamelCase__ , sep=UpperCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase__ )
lowerCamelCase : List[str] = argparse.Namespace()
with open(UpperCamelCase__ , """r""" ) as yaml_file:
try:
lowerCamelCase : str = yaml.load(UpperCamelCase__ , Loader=yaml.FullLoader )
lowerCamelCase : Optional[int] = flatten_yaml_as_dict(UpperCamelCase__ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase__ , str(UpperCamelCase__ ) ) )
return config
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase : Optional[int] = MobileViTVaConfig()
lowerCamelCase : Union[str, Any] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase : List[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase : Any = 384
else:
lowerCamelCase : str = 256
lowerCamelCase : Tuple = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase : Optional[Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase : Dict = 384
else:
lowerCamelCase : Tuple = 256
lowerCamelCase : Dict = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase : List[str] = 151
lowerCamelCase : Dict = 512
lowerCamelCase : Optional[Any] = """ade20k-id2label.json"""
lowerCamelCase : Tuple = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase : List[Any] = 21
lowerCamelCase : Union[str, Any] = 512
lowerCamelCase : str = """pascal-voc-id2label.json"""
lowerCamelCase : str = True
# orig_config
lowerCamelCase : Optional[Any] = load_orig_config_file(UpperCamelCase__ )
assert getattr(UpperCamelCase__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase : str = getattr(UpperCamelCase__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase : str = getattr(UpperCamelCase__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase : Tuple = getattr(UpperCamelCase__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase : int = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase : Any = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase : str = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase : Optional[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Dict = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : Union[str, Any] = idalabel
lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> Tuple:
lowerCamelCase : Any = dct.pop(UpperCamelCase__ )
lowerCamelCase : List[str] = val
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=False ) -> int:
if base_model:
lowerCamelCase : Any = """"""
else:
lowerCamelCase : Dict = """mobilevitv2."""
lowerCamelCase : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase : List[Any] = k[8:]
else:
lowerCamelCase : Optional[Any] = k
if ".block." in k:
lowerCamelCase : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase : int = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase : Union[str, Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase : Optional[Any] = k_new.replace("""conv_1.""" , F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
lowerCamelCase : Tuple = k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
lowerCamelCase : Optional[int] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase : int = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
lowerCamelCase : List[str] = k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
lowerCamelCase : Dict = k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
lowerCamelCase : List[str] = k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase : Optional[int] = [0, 1]
elif i == 4:
lowerCamelCase : Dict = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase : Dict = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
lowerCamelCase : int = k_new.replace(
F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
lowerCamelCase : Optional[Any] = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
lowerCamelCase : Dict = k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
lowerCamelCase : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase : Optional[Any] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase : List[str] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase : Optional[int] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase : Optional[int] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase : Dict = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase : str = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def snake_case ( UpperCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( ) -> int:
lowerCamelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ) -> int:
lowerCamelCase : Any = get_mobilevitva_config(UpperCamelCase__ , UpperCamelCase__ )
# load original state_dict
lowerCamelCase : Any = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase : List[str] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ).eval()
lowerCamelCase : List[Any] = False
else:
lowerCamelCase : int = MobileViTVaForImageClassification(UpperCamelCase__ ).eval()
lowerCamelCase : Dict = False
# remove and rename some keys of load the original model
lowerCamelCase : List[str] = checkpoint
remove_unused_keys(UpperCamelCase__ )
lowerCamelCase : List[Any] = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load modified state_dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase : Tuple = model(**UpperCamelCase__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase : Tuple = outputs.logits
lowerCamelCase : int = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase : Optional[Any] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase :Any = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[str] =['''input_features''', '''is_longer''']
def __init__( self: Optional[Any] , __a: str=64 , __a: List[str]=48_000 , __a: List[Any]=480 , __a: Dict=10 , __a: List[Any]=1_024 , __a: str=0.0 , __a: int=False , __a: float = 0 , __a: float = 14_000 , __a: int = None , __a: str = "fusion" , __a: str = "repeatpad" , **__a: Union[str, Any] , )-> Dict:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
lowerCamelCase : List[Any] = top_db
lowerCamelCase : Optional[int] = truncation
lowerCamelCase : Any = padding
lowerCamelCase : Dict = fft_window_size
lowerCamelCase : Tuple = (fft_window_size >> 1) + 1
lowerCamelCase : Dict = hop_length
lowerCamelCase : Tuple = max_length_s
lowerCamelCase : Any = max_length_s * sampling_rate
lowerCamelCase : Optional[int] = sampling_rate
lowerCamelCase : str = frequency_min
lowerCamelCase : Optional[int] = frequency_max
lowerCamelCase : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm=__a , mel_scale="""htk""" , )
lowerCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , )
def a__ ( self: Any )-> Dict[str, Any]:
lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a__ ( self: Any , __a: np.array , __a: Optional[np.array] = None )-> np.ndarray:
lowerCamelCase : Optional[Any] = spectrogram(
__a , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__a , log_mel="""dB""" , )
return log_mel_spectrogram.T
def a__ ( self: Union[str, Any] , __a: Union[str, Any] , __a: Optional[Any] , __a: Any )-> Optional[int]:
lowerCamelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase : List[Any] = [0]
# randomly choose index for each part
lowerCamelCase : int = np.random.choice(ranges[0] )
lowerCamelCase : List[str] = np.random.choice(ranges[1] )
lowerCamelCase : str = np.random.choice(ranges[2] )
lowerCamelCase : Tuple = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase : str = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase : Union[str, Any] = torch.tensor(mel[None, None, :] )
lowerCamelCase : Dict = torch.nn.functional.interpolate(
__a , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=__a )
lowerCamelCase : str = mel_shrink[0][0].numpy()
lowerCamelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a__ ( self: Union[str, Any] , __a: np.array , __a: List[Any] , __a: str , __a: List[Any] )-> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase : str = len(__a ) - max_length
lowerCamelCase : Optional[Any] = np.random.randint(0 , overflow + 1 )
lowerCamelCase : Union[str, Any] = waveform[idx : idx + max_length]
lowerCamelCase : List[Any] = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase : Optional[Any] = self._np_extract_fbank_features(__a , self.mel_filters )
lowerCamelCase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase : List[str] = False
else:
lowerCamelCase : Any = self._random_mel_fusion(__a , __a , __a )
lowerCamelCase : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
lowerCamelCase : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase : Tuple = int(max_length / len(__a ) )
lowerCamelCase : Dict = np.stack(np.tile(__a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase : Optional[Any] = int(max_length / len(__a ) )
lowerCamelCase : Optional[Any] = np.stack(np.tile(__a , __a ) )
lowerCamelCase : Optional[Any] = np.pad(__a , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowerCamelCase : str = self._np_extract_fbank_features(__a , self.mel_filters )
lowerCamelCase : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase : Dict = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: List[Any] , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: str = None , __a: Optional[str] = None , __a: Optional[int] = None , __a: Optional[int] = None , __a: Optional[Union[str, TensorType]] = None , **__a: str , )-> BatchFeature:
lowerCamelCase : Optional[Any] = truncation if truncation is not None else self.truncation
lowerCamelCase : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : int = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : str = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Tuple = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : Optional[Any] = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [np.asarray(__a )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase : Optional[Any] = [
self._get_input_mel(__a , max_length if max_length else self.nb_max_samples , __a , __a )
for waveform in raw_speech
]
lowerCamelCase : Optional[int] = []
lowerCamelCase : Union[str, Any] = []
for mel, longer in padded_inputs:
input_mel.append(__a )
is_longer.append(__a )
if truncation == "fusion" and sum(__a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase : Optional[Any] = np.random.randint(0 , len(__a ) )
lowerCamelCase : Union[str, Any] = True
if isinstance(input_mel[0] , __a ):
lowerCamelCase : List[str] = [np.asarray(__a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase : Tuple = [[longer] for longer in is_longer]
lowerCamelCase : Union[str, Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
lowerCamelCase : Any = BatchFeature(__a )
if return_tensors is not None:
lowerCamelCase : str = input_features.convert_to_tensors(__a )
return input_features
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowerCamelCase :Any = logging.get_logger(__name__)
__lowerCamelCase :Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase :Union[str, Any] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase :Dict = {
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
__lowerCamelCase :Dict = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] =VOCAB_FILES_NAMES
snake_case__ : Tuple =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Dict =PRETRAINED_INIT_CONFIGURATION
snake_case__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] =RealmTokenizer
def __init__( self: int , __a: Optional[int]=None , __a: List[Any]=None , __a: str=True , __a: int="[UNK]" , __a: Union[str, Any]="[SEP]" , __a: int="[PAD]" , __a: Tuple="[CLS]" , __a: int="[MASK]" , __a: Tuple=True , __a: Optional[Any]=None , **__a: int , )-> Tuple:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
lowerCamelCase : Dict = getattr(__a , normalizer_state.pop("""type""" ) )
lowerCamelCase : Any = do_lower_case
lowerCamelCase : str = strip_accents
lowerCamelCase : List[Any] = tokenize_chinese_chars
lowerCamelCase : Optional[Any] = normalizer_class(**__a )
lowerCamelCase : List[str] = do_lower_case
def a__ ( self: Union[str, Any] , __a: Optional[Any] , **__a: Dict )-> int:
lowerCamelCase : Optional[int] = PaddingStrategy.MAX_LENGTH
lowerCamelCase : Dict = text
lowerCamelCase : int = kwargs.pop("""text_pair""" , __a )
lowerCamelCase : int = kwargs.pop("""return_tensors""" , __a )
lowerCamelCase : Optional[int] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(__a ):
if batch_text_pair is not None:
lowerCamelCase : Optional[Any] = batch_text_pair[idx]
else:
lowerCamelCase : List[str] = None
lowerCamelCase : Optional[int] = super().__call__(__a , __a , return_tensors=__a , **__a )
lowerCamelCase : Optional[int] = encoded_candidates.get("""input_ids""" )
lowerCamelCase : Optional[Any] = encoded_candidates.get("""attention_mask""" )
lowerCamelCase : Dict = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(__a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__a )
lowerCamelCase : int = {key: item for key, item in output_data.items() if len(__a ) != 0}
return BatchEncoding(__a , tensor_type=__a )
def a__ ( self: Optional[int] , __a: List[Any] , __a: Optional[int]=None )-> Any:
lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self: Optional[Any] , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: List[Any] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
lowerCamelCase : List[Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class A__ :
"""simple docstring"""
def __init__( self: Tuple )-> None:
lowerCamelCase : Dict = [2, 1, 2, -1]
lowerCamelCase : str = [1, 2, 3, 4]
def a__ ( self: Any )-> list[float]:
lowerCamelCase : List[Any] = len(self.first_signal )
lowerCamelCase : Union[str, Any] = len(self.second_signal )
lowerCamelCase : Union[str, Any] = max(__a , __a )
# create a zero matrix of max_length x max_length
lowerCamelCase : List[str] = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
lowerCamelCase : Any = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCamelCase : Tuple = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase :Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Any , __a: bool = True , __a: Dict[str, int] = None , __a: int = 32 , __a: bool = True , __a: Union[int, float] = 1 / 255 , __a: bool = True , __a: bool = True , __a: Optional[Union[float, List[float]]] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __a: Optional[Union[float, List[float]]] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __a: bool = True , __a: Optional[int]=7 , __a: Any=30 , __a: Optional[Any]=400 , __a: int=3 , )-> int:
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : List[str] = do_resize
lowerCamelCase : str = size if size is not None else {"""shortest_edge""": 288}
lowerCamelCase : Optional[int] = size_divisor
lowerCamelCase : int = do_rescale
lowerCamelCase : str = rescale_factor
lowerCamelCase : str = do_normalize
lowerCamelCase : List[str] = do_center_crop
lowerCamelCase : List[Any] = image_mean
lowerCamelCase : Dict = image_std
lowerCamelCase : int = do_pad
lowerCamelCase : Dict = batch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : List[str] = min_resolution
lowerCamelCase : str = max_resolution
def a__ ( self: List[Any] )-> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a__ ( self: List[str] , __a: Tuple , __a: List[str]=False )-> Optional[int]:
if not batched:
lowerCamelCase : List[str] = self.size["""shortest_edge"""]
lowerCamelCase : Optional[int] = image_inputs[0]
if isinstance(__a , Image.Image ):
lowerCamelCase , lowerCamelCase : Dict = image.size
else:
lowerCamelCase , lowerCamelCase : str = image.shape[1], image.shape[2]
lowerCamelCase : Dict = size / min(__a , __a )
if h < w:
lowerCamelCase , lowerCamelCase : List[Any] = size, scale * w
else:
lowerCamelCase , lowerCamelCase : int = scale * h, size
lowerCamelCase : Any = int((1_333 / 800) * size )
if max(__a , __a ) > max_size:
lowerCamelCase : Union[str, Any] = max_size / max(__a , __a )
lowerCamelCase : Any = newh * scale
lowerCamelCase : Dict = neww * scale
lowerCamelCase , lowerCamelCase : Union[str, Any] = int(newh + 0.5 ), int(neww + 0.5 )
lowerCamelCase , lowerCamelCase : Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowerCamelCase : List[str] = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase : List[Any] = max(__a , key=lambda __a : item[0] )[0]
lowerCamelCase : str = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =BridgeTowerImageProcessor if is_vision_available() else None
def a__ ( self: Any )-> Dict:
lowerCamelCase : List[str] = BridgeTowerImageProcessingTester(self )
@property
def a__ ( self: List[str] )-> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self: Optional[int] )-> Dict:
lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """size_divisor""" ) )
def a__ ( self: Union[str, Any] )-> Tuple:
pass
def a__ ( self: List[Any] )-> Any:
# Initialize image processor
lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : str = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase : Dict = image_processing(__a , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
# Initialize image processor
lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : int = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self: Optional[Any] )-> str:
# Initialize image processor
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Dict = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase : Optional[int] = image_processing(__a , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Any = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
class A__ :
"""simple docstring"""
def __init__( self: Any , __a: List[str] , __a: str )-> Any:
lowerCamelCase : List[Any] = question_encoder
lowerCamelCase : List[str] = generator
lowerCamelCase : Optional[Any] = self.question_encoder
def a__ ( self: int , __a: int )-> List[str]:
if os.path.isfile(__a ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__a , exist_ok=__a )
lowerCamelCase : Tuple = os.path.join(__a , """question_encoder_tokenizer""" )
lowerCamelCase : Any = os.path.join(__a , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__a )
self.generator.save_pretrained(__a )
@classmethod
def a__ ( cls: Optional[Any] , __a: Dict , **__a: Any )-> Optional[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : List[Any] = kwargs.pop("""config""" , __a )
if config is None:
lowerCamelCase : Optional[int] = RagConfig.from_pretrained(__a )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__a , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__a , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__a , generator=__a )
def __call__( self: Tuple , *__a: str , **__a: Any )-> List[Any]:
return self.current_tokenizer(*__a , **__a )
def a__ ( self: Union[str, Any] , *__a: str , **__a: Tuple )-> Any:
return self.generator.batch_decode(*__a , **__a )
def a__ ( self: Optional[int] , *__a: Tuple , **__a: str )-> Optional[Any]:
return self.generator.decode(*__a , **__a )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : List[str] = self.question_encoder
def a__ ( self: Dict )-> int:
lowerCamelCase : List[Any] = self.generator
def a__ ( self: Optional[Any] , __a: List[str] , __a: Optional[List[str]] = None , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "longest" , __a: str = None , __a: bool = True , **__a: Optional[int] , )-> BatchEncoding:
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __a , )
if max_length is None:
lowerCamelCase : List[str] = self.current_tokenizer.model_max_length
lowerCamelCase : List[str] = self(
__a , add_special_tokens=__a , return_tensors=__a , max_length=__a , padding=__a , truncation=__a , **__a , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : List[str] = self.current_tokenizer.model_max_length
lowerCamelCase : Any = self(
text_target=__a , add_special_tokens=__a , return_tensors=__a , padding=__a , max_length=__a , truncation=__a , **__a , )
lowerCamelCase : Union[str, Any] = labels["""input_ids"""]
return model_inputs
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Tuple , __a: Optional[Any]=13 , __a: List[str]=7 , __a: Union[str, Any]=True , __a: List[Any]=True , __a: Any=True , __a: List[str]=True , __a: Any=99 , __a: Tuple=32 , __a: List[Any]=5 , __a: Dict=4 , __a: Union[str, Any]=37 , __a: Dict="gelu" , __a: Union[str, Any]=0.1 , __a: Tuple=0.1 , __a: str=128 , __a: int=32 , __a: Union[str, Any]=16 , __a: Dict=2 , __a: Union[str, Any]=0.02 , __a: str=3 , __a: Optional[Any]=4 , __a: List[Any]=None , )-> Any:
lowerCamelCase : List[Any] = parent
lowerCamelCase : Dict = batch_size
lowerCamelCase : List[Any] = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : int = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : str = intermediate_size
lowerCamelCase : Any = hidden_act
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Any = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : Optional[int] = type_vocab_size
lowerCamelCase : Optional[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Dict = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Dict = scope
def a__ ( self: Tuple )-> Union[str, Any]:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : str = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : str = None
if self.use_token_type_ids:
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : int = None
lowerCamelCase : List[Any] = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Union[str, Any] )-> int:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def a__ ( self: List[str] )-> List[Any]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self: Dict , __a: List[Any] , __a: Union[str, Any] , __a: Union[str, Any] , __a: List[str] , __a: List[str] , __a: int , __a: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = NezhaModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase : str = model(__a , token_type_ids=__a )
lowerCamelCase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: Any , __a: Dict , __a: Tuple , __a: str , __a: List[str] , __a: List[str] , __a: Optional[Any] , __a: List[Any] , __a: Tuple , __a: Tuple , )-> Dict:
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Union[str, Any] = NezhaModel(__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
lowerCamelCase : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
lowerCamelCase : List[str] = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: Tuple , __a: Optional[int] , __a: List[str] , __a: Union[str, Any] , __a: Union[str, Any] , __a: str , __a: Optional[Any] , __a: Optional[int] )-> Tuple:
lowerCamelCase : Union[str, Any] = NezhaForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: str , __a: List[Any] , __a: Optional[int] , __a: List[Any] , __a: str , __a: Optional[Any] , __a: List[Any] , __a: Dict )-> Any:
lowerCamelCase : Optional[int] = NezhaForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self: Dict , __a: List[Any] , __a: int , __a: List[Any] , __a: Union[str, Any] , __a: Optional[int] , __a: List[Any] , __a: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = NezhaForPreTraining(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self: List[Any] , __a: str , __a: Any , __a: Optional[int] , __a: Union[str, Any] , __a: Tuple , __a: Dict , __a: List[str] )-> Dict:
lowerCamelCase : str = NezhaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self: Any , __a: List[Any] , __a: Optional[int] , __a: Tuple , __a: List[Any] , __a: str , __a: int , __a: Optional[int] )-> str:
lowerCamelCase : int = self.num_labels
lowerCamelCase : List[Any] = NezhaForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: Tuple , __a: Dict , __a: Dict , __a: Any , __a: str , __a: List[str] , __a: Optional[Any] , __a: Dict )-> Tuple:
lowerCamelCase : List[str] = self.num_labels
lowerCamelCase : int = NezhaForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Any , __a: List[Any] , __a: int , __a: Union[str, Any] , __a: Optional[Any] , __a: Tuple , __a: List[Any] , __a: Dict )-> int:
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Union[str, Any] = NezhaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =(
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
def a__ ( self: str , __a: Any , __a: str , __a: int=False )-> Optional[int]:
lowerCamelCase : Dict = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
lowerCamelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
lowerCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def a__ ( self: Tuple )-> Dict:
lowerCamelCase : Union[str, Any] = NezhaModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: str )-> List[str]:
self.config_tester.run_common_tests()
def a__ ( self: Union[str, Any] )-> str:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def a__ ( self: Union[str, Any] )-> Dict:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase : Dict = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
def a__ ( self: int )-> Tuple:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__a )
def a__ ( self: str )-> str:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def a__ ( self: Dict )-> str:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def a__ ( self: List[Any] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Tuple )-> Optional[int]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[int] = NezhaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCamelCase : int = True
lowerCamelCase : Dict = model_class(config=__a )
lowerCamelCase : int = self._prepare_for_class(__a , __a )
lowerCamelCase : Optional[Any] = torch.jit.trace(
__a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , """bert.pt""" ) )
lowerCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__a , """bert.pt""" ) , map_location=__a )
loaded(inputs_dict["""input_ids"""].to(__a ) , inputs_dict["""attention_mask"""].to(__a ) )
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : List[str] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCamelCase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(__a , attention_mask=__a )[0]
lowerCamelCase : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Any = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
@slow
def a__ ( self: List[str] )-> Optional[int]:
lowerCamelCase : Tuple = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCamelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase : Tuple = model(__a , attention_mask=__a )[0]
lowerCamelCase : Dict = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , __a )
lowerCamelCase : str = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase :Any = 'src/diffusers'
__lowerCamelCase :List[str] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCamelCase :Optional[int] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCamelCase :int = spec.loader.load_module()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> List[str]:
return line.startswith(UpperCamelCase__ ) or len(UpperCamelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCamelCase__ ) is not None
def snake_case ( UpperCamelCase__ : Any ) -> Optional[int]:
lowerCamelCase : Dict = object_name.split(""".""" )
lowerCamelCase : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCamelCase : Dict = parts[i]
while i < len(UpperCamelCase__ ) and not os.path.isfile(os.path.join(UpperCamelCase__ , F'{module}.py' ) ):
i += 1
if i < len(UpperCamelCase__ ):
lowerCamelCase : Any = os.path.join(UpperCamelCase__ , parts[i] )
if i >= len(UpperCamelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCamelCase__ , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase : List[Any] = """"""
lowerCamelCase : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase__ ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase : str = line_index
while line_index < len(UpperCamelCase__ ) and _should_continue(lines[line_index] , UpperCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase : Optional[Any] = lines[start_index:line_index]
return "".join(UpperCamelCase__ )
__lowerCamelCase :Any = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
__lowerCamelCase :int = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
__lowerCamelCase :List[Any] = re.compile(r'<FILL\s+[^>]*>')
def snake_case ( UpperCamelCase__ : Any ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = code.split("""\n""" )
lowerCamelCase : Any = 0
while idx < len(UpperCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def snake_case ( UpperCamelCase__ : int ) -> Optional[int]:
lowerCamelCase : Any = len(get_indent(UpperCamelCase__ ) ) > 0
if has_indent:
lowerCamelCase : int = F'class Bla:\n{code}'
lowerCamelCase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase__ )
lowerCamelCase : Optional[int] = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Dict = style_docstrings_in_code(UpperCamelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False ) -> List[str]:
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase : List[str] = f.readlines()
lowerCamelCase : List[str] = []
lowerCamelCase : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = search.groups()
lowerCamelCase : Union[str, Any] = find_code_in_diffusers(UpperCamelCase__ )
lowerCamelCase : str = get_indent(UpperCamelCase__ )
lowerCamelCase : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase : Optional[Any] = theoretical_indent
lowerCamelCase : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase : Union[str, Any] = True
while line_index < len(UpperCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase__ ):
break
lowerCamelCase : Any = lines[line_index]
lowerCamelCase : Any = _should_continue(UpperCamelCase__ , UpperCamelCase__ ) and re.search(F'^{indent}# End copy' , UpperCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase : Any = lines[start_index:line_index]
lowerCamelCase : Any = """""".join(UpperCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase : Any = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCamelCase__ ) is None]
lowerCamelCase : Dict = """\n""".join(UpperCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase__ ) > 0:
lowerCamelCase : Union[str, Any] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCamelCase : Dict = [_re_replace_pattern.search(UpperCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = pattern.groups()
lowerCamelCase : List[str] = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if option.strip() == "all-casing":
lowerCamelCase : List[Any] = re.sub(obja.lower() , obja.lower() , UpperCamelCase__ )
lowerCamelCase : List[Any] = re.sub(obja.upper() , obja.upper() , UpperCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase : Optional[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase : List[str] = start_index + 1
if overwrite and len(UpperCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase__ )
return diffs
def snake_case ( UpperCamelCase__ : bool = False ) -> Tuple:
lowerCamelCase : int = glob.glob(os.path.join(UpperCamelCase__ , """**/*.py""" ) , recursive=UpperCamelCase__ )
lowerCamelCase : List[str] = []
for filename in all_files:
lowerCamelCase : Any = is_copy_consistent(UpperCamelCase__ , UpperCamelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCamelCase__ ) > 0:
lowerCamelCase : Tuple = """\n""".join(UpperCamelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCamelCase :str = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =ViTImageProcessor if is_vision_available() else None
@property
def a__ ( self: Dict )-> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self: Any )-> Dict:
lowerCamelCase : Optional[Any] = (3, 32, 128)
lowerCamelCase : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCamelCase : str = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
lowerCamelCase : Optional[int] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCamelCase : Dict = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def a__ ( self: List[Any] , **__a: List[str] )-> List[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: int , **__a: List[Any] )-> Dict:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: List[str] )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self: Dict )-> Dict:
lowerCamelCase : str = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowerCamelCase : Dict = Image.fromarray(np.moveaxis(__a , 0 , -1 ) )
return image_input
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : Dict = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : List[str] = self.get_image_processor()
lowerCamelCase : str = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase : Optional[Any] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : int = MgpstrProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : List[str] = self.prepare_image_inputs()
lowerCamelCase : List[Any] = image_processor(__a , return_tensors="""np""" )
lowerCamelCase : Any = processor(images=__a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Optional[Any] = """test"""
lowerCamelCase : Union[str, Any] = processor(text=__a )
lowerCamelCase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self: str )-> Dict:
lowerCamelCase : int = self.get_image_processor()
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : str = MgpstrProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Dict = """test"""
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Optional[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : List[str] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : Tuple = processor.char_decode(__a )
lowerCamelCase : Optional[Any] = tokenizer.batch_decode(__a )
lowerCamelCase : Optional[Any] = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__a , __a )
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = self.get_image_processor()
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Dict = MgpstrProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Tuple = None
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : Tuple = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def a__ ( self: Union[str, Any] )-> str:
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : str = MgpstrProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase : Dict = torch.randn(1 , 27 , 38 )
lowerCamelCase : str = torch.randn(1 , 27 , 50_257 )
lowerCamelCase : List[Any] = torch.randn(1 , 27 , 30_522 )
lowerCamelCase : Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
lowerCamelCase : Optional[int] = cva.getAffineTransform(UpperCamelCase__ , UpperCamelCase__ )
return cva.warpAffine(UpperCamelCase__ , UpperCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCamelCase :List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__lowerCamelCase :Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCamelCase , __lowerCamelCase :List[Any] = gray_img.shape
# set different points to rotate image
__lowerCamelCase :List[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCamelCase :List[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCamelCase :Tuple = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCamelCase :Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCamelCase :List[str] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCamelCase :Optional[int] = plt.figure(1)
__lowerCamelCase :Optional[Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__lowerCamelCase :Optional[Any] = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : torch.FloatTensor
class A__ ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self: Optional[Any] , __a: int = 16 , __a: int = 88 , __a: Optional[int] = None , __a: Optional[int] = None , __a: int = 1 , __a: float = 0.0 , __a: int = 32 , __a: Optional[int] = None , __a: bool = False , __a: Optional[int] = None , __a: str = "geglu" , __a: bool = True , __a: bool = True , )-> Dict:
super().__init__()
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Union[str, Any] = attention_head_dim
lowerCamelCase : Optional[Any] = num_attention_heads * attention_head_dim
lowerCamelCase : str = in_channels
lowerCamelCase : Union[str, Any] = torch.nn.GroupNorm(num_groups=__a , num_channels=__a , eps=1e-6 , affine=__a )
lowerCamelCase : Any = nn.Linear(__a , __a )
# 3. Define transformers blocks
lowerCamelCase : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , cross_attention_dim=__a , activation_fn=__a , attention_bias=__a , double_self_attention=__a , norm_elementwise_affine=__a , )
for d in range(__a )
] )
lowerCamelCase : Any = nn.Linear(__a , __a )
def a__ ( self: List[str] , __a: Union[str, Any] , __a: str=None , __a: Any=None , __a: List[Any]=None , __a: Tuple=1 , __a: int=None , __a: bool = True , )-> Dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = hidden_states.shape
lowerCamelCase : Tuple = batch_frames // num_frames
lowerCamelCase : List[str] = hidden_states
lowerCamelCase : int = hidden_states[None, :].reshape(__a , __a , __a , __a , __a )
lowerCamelCase : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase : List[Any] = self.norm(__a )
lowerCamelCase : Tuple = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __a , __a )
lowerCamelCase : Optional[int] = self.proj_in(__a )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase : Tuple = block(
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , class_labels=__a , )
# 3. Output
lowerCamelCase : Dict = self.proj_out(__a )
lowerCamelCase : List[str] = (
hidden_states[None, None, :]
.reshape(__a , __a , __a , __a , __a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase : Tuple = hidden_states.reshape(__a , __a , __a , __a )
lowerCamelCase : str = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__a )
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase :str = 0
__lowerCamelCase :Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase :Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase :Tuple = tuple[int, int]
class A__ :
"""simple docstring"""
def __init__( self: int , __a: int , __a: int , __a: int , __a: int , __a: int , __a: Node | None , )-> None:
lowerCamelCase : Optional[int] = pos_x
lowerCamelCase : List[Any] = pos_y
lowerCamelCase : Union[str, Any] = (pos_y, pos_x)
lowerCamelCase : List[Any] = goal_x
lowerCamelCase : Optional[Any] = goal_y
lowerCamelCase : str = g_cost
lowerCamelCase : str = parent
lowerCamelCase : Optional[Any] = self.calculate_heuristic()
lowerCamelCase : List[str] = self.g_cost + self.h_cost
def a__ ( self: List[str] )-> float:
lowerCamelCase : List[str] = self.pos_x - self.goal_x
lowerCamelCase : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__a ) + abs(__a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: List[Any] , __a: Node )-> bool:
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self: Any , __a: TPosition , __a: TPosition )-> Optional[Any]:
lowerCamelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __a )
lowerCamelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __a )
lowerCamelCase : Optional[Any] = [self.start]
lowerCamelCase : list[Node] = []
lowerCamelCase : Tuple = False
def a__ ( self: int )-> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__a )
self.closed_nodes.append(__a )
lowerCamelCase : Dict = self.get_successors(__a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__a )
else:
# retrieve the best current path
lowerCamelCase : Any = self.open_nodes.pop(self.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__a )
else:
self.open_nodes.append(__a )
return [self.start.pos]
def a__ ( self: Union[str, Any] , __a: Node )-> list[Node]:
lowerCamelCase : str = []
for action in delta:
lowerCamelCase : Union[str, Any] = parent.pos_x + action[1]
lowerCamelCase : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__a , __a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __a , ) )
return successors
def a__ ( self: Union[str, Any] , __a: Node | None )-> list[TPosition]:
lowerCamelCase : Dict = node
lowerCamelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase : str = current_node.parent
path.reverse()
return path
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: TPosition , __a: TPosition )-> None:
lowerCamelCase : Dict = AStar(__a , __a )
lowerCamelCase : str = AStar(__a , __a )
lowerCamelCase : int = False
def a__ ( self: str )-> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__a , __a )
self.fwd_astar.closed_nodes.append(__a )
self.bwd_astar.closed_nodes.append(__a )
lowerCamelCase : int = current_bwd_node
lowerCamelCase : List[Any] = current_fwd_node
lowerCamelCase : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(__a ),
self.bwd_astar: self.bwd_astar.get_successors(__a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__a )
else:
# retrieve the best current path
lowerCamelCase : str = astar.open_nodes.pop(
astar.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__a )
else:
astar.open_nodes.append(__a )
return [self.fwd_astar.start.pos]
def a__ ( self: List[str] , __a: Node , __a: Node )-> list[TPosition]:
lowerCamelCase : str = self.fwd_astar.retrace_path(__a )
lowerCamelCase : Any = self.bwd_astar.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase :Optional[int] = (0, 0)
__lowerCamelCase :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase :Optional[int] = time.time()
__lowerCamelCase :Optional[int] = AStar(init, goal)
__lowerCamelCase :int = a_star.search()
__lowerCamelCase :Optional[Any] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__lowerCamelCase :List[Any] = time.time()
__lowerCamelCase :List[str] = BidirectionalAStar(init, goal)
__lowerCamelCase :List[str] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowerCamelCase :Dict = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any=False ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
lowerCamelCase : Optional[Any] = os.path.abspath(UpperCamelCase__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
lowerCamelCase : Any = torch.load(UpperCamelCase__ , map_location="""cpu""" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
lowerCamelCase : List[str] = convert_pytorch_state_dict_to_flax(UpperCamelCase__ , UpperCamelCase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(UpperCamelCase__ , UpperCamelCase__ )
return flax_state_dict
def snake_case ( UpperCamelCase__ : Tuple[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, jnp.ndarray] , UpperCamelCase__ : str , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(UpperCamelCase__ : Tuple[str] ) -> bool:
return len(set(UpperCamelCase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase : Optional[int] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase : str = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase : Tuple = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase : Optional[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase : Optional[int] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase : int = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase : List[Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase : Optional[Any] = pt_tuple_key[-2] + """_v"""
if name is not None:
lowerCamelCase : List[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Optional[int]:
# convert pytorch tensor to numpy
lowerCamelCase : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase : int = flax_model.params["""params"""]
else:
lowerCamelCase : Union[str, Any] = flax_model.params
lowerCamelCase : List[str] = flatten_dict(UpperCamelCase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase : Optional[Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(UpperCamelCase__ )
lowerCamelCase : int = {}
lowerCamelCase : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase : Dict = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
lowerCamelCase : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : int = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase , lowerCamelCase : str = rename_key_and_reshape_tensor(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# add model prefix if necessary
lowerCamelCase : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase : List[str] = jnp.asarray(UpperCamelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase : Union[str, Any] = jnp.asarray(UpperCamelCase__ )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase : List[str] = jnp.asarray(UpperCamelCase__ )
return unflatten_dict(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Optional[int]:
import torch
# Load the index
lowerCamelCase : List[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase : Any = torch.load(UpperCamelCase__ )
lowerCamelCase : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase : str = flax_model.params["""params"""]
lowerCamelCase : List[str] = flatten_dict(UpperCamelCase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
lowerCamelCase : Dict = flax_model.params
lowerCamelCase : List[Any] = flatten_dict(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase : Tuple = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
lowerCamelCase : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase , lowerCamelCase : int = rename_key_and_reshape_tensor(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# add model prefix if necessary
lowerCamelCase : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase : Optional[int] = jnp.asarray(UpperCamelCase__ )
continue
if "var" in flax_key[-1]:
lowerCamelCase : Any = jnp.asarray(UpperCamelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase : Tuple = jnp.asarray(UpperCamelCase__ )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase : List[Any] = jnp.asarray(UpperCamelCase__ )
return unflatten_dict(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
lowerCamelCase : int = os.path.abspath(UpperCamelCase__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
lowerCamelCase : List[str] = getattr(UpperCamelCase__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(UpperCamelCase__ , """rb""" ) as state_f:
try:
lowerCamelCase : Optional[int] = from_bytes(UpperCamelCase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase__ : x.dtype == jnp.bfloataa , UpperCamelCase__ ) ).values()
if any(UpperCamelCase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase : Optional[Any] = jax.tree_util.tree_map(
lambda UpperCamelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase__ )
lowerCamelCase : List[Any] = flatten_dict(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = pt_model.state_dict()
lowerCamelCase : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase : List[str] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase : int = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : Optional[int] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : List[str] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(UpperCamelCase__ ) not in pt_model_dict:
# conv layer
lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase : Dict = jnp.transpose(UpperCamelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ) not in pt_model_dict:
# linear layer
lowerCamelCase : str = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase : Any = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
lowerCamelCase : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase : Optional[int] = """.""".join(UpperCamelCase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase : Tuple = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase : Any = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase : Optional[Any] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase : int = key_components[-2] + """_v"""
if name is not None:
lowerCamelCase : str = key_components[:-3] + [name]
lowerCamelCase : int = """.""".join(UpperCamelCase__ )
lowerCamelCase : Any = key
if flax_key in special_pt_names:
lowerCamelCase : Optional[int] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCamelCase : List[Any] = np.asarray(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , np.ndarray ) else flax_tensor
lowerCamelCase : Tuple = torch.from_numpy(UpperCamelCase__ )
# remove from missing keys
missing_keys.remove(UpperCamelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase__ )
pt_model.load_state_dict(UpperCamelCase__ )
# re-transform missing_keys to list
lowerCamelCase : Union[str, Any] = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(UpperCamelCase__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"""If your task is similar to the task the model of the checkpoint was trained on, """
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def snake_case ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int ) -> bool:
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCamelCase :Any = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__lowerCamelCase :Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__lowerCamelCase :List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''whisper'''
snake_case__ : Optional[Any] =['''past_key_values''']
snake_case__ : Union[str, Any] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: Optional[int] , __a: int=51_865 , __a: str=80 , __a: int=6 , __a: Union[str, Any]=4 , __a: Union[str, Any]=6 , __a: Union[str, Any]=4 , __a: str=1_536 , __a: Any=1_536 , __a: int=0.0 , __a: Optional[Any]=0.0 , __a: Dict=50_257 , __a: str=True , __a: Optional[Any]=True , __a: List[Any]="gelu" , __a: Optional[int]=256 , __a: Tuple=0.0 , __a: Union[str, Any]=0.0 , __a: Union[str, Any]=0.0 , __a: str=0.02 , __a: Tuple=False , __a: Optional[Any]=1_500 , __a: Optional[Any]=448 , __a: List[str]=50_256 , __a: int=50_256 , __a: Tuple=50_256 , __a: str=None , __a: Union[str, Any]=[220, 50_256] , __a: Optional[int]=False , __a: str=256 , __a: str=False , __a: Tuple=0.05 , __a: Union[str, Any]=10 , __a: Tuple=2 , __a: Optional[int]=0.0 , __a: List[Any]=10 , __a: Any=0 , __a: Optional[int]=7 , **__a: Tuple , )-> str:
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : List[str] = num_mel_bins
lowerCamelCase : Dict = d_model
lowerCamelCase : int = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[Any] = decoder_attention_heads
lowerCamelCase : Dict = decoder_ffn_dim
lowerCamelCase : List[Any] = encoder_ffn_dim
lowerCamelCase : str = dropout
lowerCamelCase : Tuple = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : List[Any] = activation_function
lowerCamelCase : List[str] = init_std
lowerCamelCase : Union[str, Any] = encoder_layerdrop
lowerCamelCase : List[Any] = decoder_layerdrop
lowerCamelCase : List[str] = use_cache
lowerCamelCase : int = encoder_layers
lowerCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase : Dict = max_source_positions
lowerCamelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : int = classifier_proj_size
lowerCamelCase : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : int = mask_time_prob
lowerCamelCase : Optional[Any] = mask_time_length
lowerCamelCase : Dict = mask_time_min_masks
lowerCamelCase : Any = mask_feature_prob
lowerCamelCase : Dict = mask_feature_length
lowerCamelCase : List[str] = mask_feature_min_masks
lowerCamelCase : int = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class A__ ( __lowercase):
"""simple docstring"""
@property
def a__ ( self: Tuple )-> Mapping[str, Mapping[int, str]]:
lowerCamelCase : Tuple = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase : List[str] = {0: """batch"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
return common_inputs
def a__ ( self: int , __a: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a: int = -1 , __a: int = -1 , __a: bool = False , __a: Optional["TensorType"] = None , __a: int = 22_050 , __a: float = 5.0 , __a: int = 220 , )-> Mapping[str, Any]:
lowerCamelCase : Dict = OrderedDict()
lowerCamelCase : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
lowerCamelCase : Any = encoder_inputs["""input_features"""].shape[2]
lowerCamelCase : str = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase : List[str] = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
lowerCamelCase : Union[str, Any] = encoder_inputs.pop("""input_features""" )
lowerCamelCase : List[Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
lowerCamelCase : List[Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def a__ ( self: List[str] )-> float:
return 1e-3
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.