code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __magic_name__ ( _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None ):
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = field(
metadata={"help": "The csv file to plot."} , )
_lowerCAmelCase = field(
default=__snake_case , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCAmelCase = field(
default=__snake_case , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCAmelCase = field(
default=__snake_case , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCAmelCase = field(
default=__snake_case , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCAmelCase = field(
default=__snake_case , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCAmelCase = list_field(
default=__snake_case , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
try:
int(_lowerCamelCase )
return True
except ValueError:
return False
def __magic_name__ ( _lowerCamelCase : Any ):
try:
float(_lowerCamelCase )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : Dict = args
__a : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
__a : Optional[Any] = csv.DictReader(_lowercase )
for row in reader:
__a : Union[str, Any] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
__a : List[Any] = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
__a : List[Any] = float(row["""result"""] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Any = plt.subplots()
__a : Tuple = """Time usage""" if self.args.is_time else """Memory usage"""
__a : Union[str, Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a : Any = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
__a : Optional[Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
__a : List[Any] = self.result_dict[model_name]["""result"""]
((__a) , (__a)) : Any = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a : List[Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a : List[str] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowercase , )
else:
__a : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__a) , (__a)) : Any = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__a : Dict = np.asarray(_lowercase , _lowercase )[: len(_lowercase )]
plt.scatter(
_lowercase , _lowercase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_lowercase , _lowercase , """--""" )
title_str += F''' {label_model_name} vs.'''
__a : List[Any] = title_str[:-4]
__a : Any = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(_lowercase )
plt.xlabel(_lowercase )
plt.ylabel(_lowercase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __magic_name__ ( ):
__a : Tuple = HfArgumentParser(_lowerCamelCase )
__a : List[Any] = parser.parse_args_into_dataclasses()[0]
__a : Union[str, Any] = Plot(args=_lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 581
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ = 10
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ):
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
__a : Optional[int] = 0
__a : Tuple = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Tuple = (left + right) // 3 + 1
__a : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a : Optional[Any] = one_third - 1
elif array[two_third] < target:
__a : List[Any] = two_third + 1
else:
__a : Any = one_third + 1
__a : Union[str, Any] = two_third - 1
else:
return -1
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ):
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = (left + right) // 3 + 1
__a : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = input("Enter numbers separated by comma:\n").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowercase__ = int(input("Enter the number to be found in the list:\n").strip())
lowercase__ = ite_ternary_search(collection, target)
lowercase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print("Not found")
| 581
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 75
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =_ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ =get_sagemaker_input()
else:
lowerCamelCase_ =get_cluster_input()
return config
def __UpperCamelCase ( _A : List[str]=None ) ->str:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""config""" , description=_A )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate config command""" , description=_A )
parser.add_argument(
"""--config_file""" , default=_A , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_user_input()
if args.config_file is not None:
lowerCamelCase_ =args.config_file
else:
if not os.path.isdir(_A ):
os.makedirs(_A )
lowerCamelCase_ =default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_A )
else:
config.to_yaml_file(_A )
print(f'accelerate configuration saved at {config_file}' )
def __UpperCamelCase ( ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =config_command_parser()
lowerCamelCase_ =parser.parse_args()
config_command(_A )
if __name__ == "__main__":
main()
| 75
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowerCAmelCase = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def _UpperCAmelCase ( __A : Optional[Any] , __A : List[str] , __A : List[str] , __A : Union[str, Any] ):
a_ : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
a_ : Tuple = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , __A , )
is not None
):
a_ : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a_ : Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a_ : Tuple = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
a_ : Tuple = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
a_ : Optional[int] = True
if not attribute_used:
a_ : Optional[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a_ : Dict = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a_ : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a_ : Optional[int] = True
elif attribute.endswith('''_token_id''' ):
a_ : Tuple = True
# configuration class specific cases
if not case_allowed:
a_ : Dict = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a_ : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : str = dict(inspect.signature(config_class.__init__ ).parameters )
a_ : List[str] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
a_ : Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a_ : int = {}
if len(config_class.attribute_map ) > 0:
a_ : List[str] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a_ : Tuple = inspect.getsourcefile(__A )
a_ : str = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a_ : Tuple = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
a_ : Dict = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
a_ : List[Any] = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
a_ : int = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def _UpperCAmelCase ( ):
a_ : Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a_ : int = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a_ : int = check_config_attributes_being_used(__A )
if len(__A ) > 0:
a_ : Dict = unused_attributes
if len(__A ) > 0:
a_ : Dict = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 466
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''albert'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0000 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=1_6384 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : int =vocab_size
snake_case__ : Union[str, Any] =embedding_size
snake_case__ : Tuple =hidden_size
snake_case__ : str =num_hidden_layers
snake_case__ : List[str] =num_hidden_groups
snake_case__ : Tuple =num_attention_heads
snake_case__ : Optional[int] =inner_group_num
snake_case__ : List[Any] =hidden_act
snake_case__ : int =intermediate_size
snake_case__ : Any =hidden_dropout_prob
snake_case__ : int =attention_probs_dropout_prob
snake_case__ : int =max_position_embeddings
snake_case__ : Optional[int] =type_vocab_size
snake_case__ : int =initializer_range
snake_case__ : List[str] =layer_norm_eps
snake_case__ : Union[str, Any] =classifier_dropout_prob
snake_case__ : Union[str, Any] =position_embedding_type
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
snake_case__ : List[str] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ : Dict ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 408
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =['''input_features''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=1_6000 , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =num_mel_bins
snake_case__ : int =do_ceptral_normalize
snake_case__ : Dict =normalize_means
snake_case__ : str =normalize_vars
snake_case__ : Optional[Any] =True
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
snake_case__ : List[str] =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
snake_case__ : int =torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
snake_case__ : Optional[int] =ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
snake_case__ : Any =x[:input_length].mean(axis=0 )
snake_case__ : Optional[Any] =np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if normalize_vars:
snake_case__ : int =x[:input_length].std(axis=0 )
snake_case__ : Optional[Any] =np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
snake_case__ : Tuple =padding_value
# make sure array is in float32
snake_case__ : Tuple =x.astype(np.floataa )
return x
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
snake_case__ : Union[str, Any] =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case__ : List[Any] =isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case__ : Optional[int] =is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : str =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case__ : Any =np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : Optional[Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Optional[Any] =[raw_speech]
# extract fbank features
snake_case__ : List[Any] =[self._extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
snake_case__ : Optional[int] =BatchFeature({'''input_features''': features} )
snake_case__ : List[Any] =self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
snake_case__ : int =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
snake_case__ : Tuple =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case__ : Dict =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case__ : List[str] =(
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case__ : Union[str, Any] =self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
snake_case__ : Optional[int] =padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 408
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 458
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : int
lowerCAmelCase__ : TreeNode | None = None
lowerCAmelCase__ : TreeNode | None = None
lowerCamelCase : Optional[Any] = namedtuple('CoinsDistribResult', 'moves excess')
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ ,lowercase__ = get_distrib(node.left )
lowercase__ ,lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = "▁"
__SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__SCREAMING_SNAKE_CASE : Any = {"vinai/bartpho-syllable": 1_024}
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
__a : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
__a : Union[str, Any] = vocab_file
__a : Optional[int] = monolingual_vocab_file
__a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__a : str = {}
__a : str = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids:
__a : Union[str, Any] = cnt
cnt += 1
with open(_UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
__a : Union[str, Any] = line.strip().split()[0]
__a : Union[str, Any] = len(self.fairseq_tokens_to_ids )
if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids:
__a : List[str] = len(self.fairseq_tokens_to_ids )
__a : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__a : int = self.__dict__.copy()
__a : Any = None
__a : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ):
'''simple docstring'''
__a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a : Optional[Any] = {}
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Dict = [self.cls_token_id]
__a : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
__a : Optional[Any] = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = """""".join(_UpperCamelCase ).replace(_UpperCamelCase , """ """ ).strip()
return out_string
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Dict = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__a : str = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , """wb""" ) as fi:
__a : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(_UpperCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 715
|
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 697
| 0
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "T5Config"
class __UpperCamelCase ( A__ ):
__A : str = """mt5"""
__A : Optional[Any] = MTaConfig
class __UpperCamelCase ( A__ ):
__A : Tuple = """mt5"""
__A : List[str] = MTaConfig
class __UpperCamelCase ( A__ ):
__A : Optional[Any] = """mt5"""
__A : int = MTaConfig
| 32
|
def _a ( __UpperCamelCase : int ):
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowerCAmelCase__ : List[str] = str(__UpperCamelCase )
lowerCAmelCase__ : List[Any] = ''''''.join(sorted(__UpperCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _a ( __UpperCamelCase : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Tuple = 1
while True:
if check_bouncy(__UpperCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(9_9)}""")
| 233
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.abspath(_UpperCAmelCase)
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''')
# Load weights from TF model
SCREAMING_SNAKE_CASE = tf.train.list_variables(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
SCREAMING_SNAKE_CASE = full_name.split('/')
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''')
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''')
continue
if name[0] == "model":
# ignore initial 'model'
SCREAMING_SNAKE_CASE = name[1:]
# figure out how many levels deep the name is
SCREAMING_SNAKE_CASE = 0
for _name in name:
if _name.startswith('layer_with_weights'):
depth += 1
else:
break
layer_depth.append(_UpperCAmelCase)
# read data
SCREAMING_SNAKE_CASE = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase)
names.append('/'.join(_UpperCAmelCase))
arrays.append(_UpperCAmelCase)
logger.info(F'''Read a total of {len(_UpperCAmelCase):,} layers''')
# Sanity check
if len(set(_UpperCAmelCase)) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(_UpperCAmelCase))})''')
SCREAMING_SNAKE_CASE = list(set(_UpperCAmelCase))[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.')
# convert layers
logger.info('Converting weights...')
for full_name, array in zip(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = full_name.split('/')
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = []
for i, m_name in enumerate(_UpperCAmelCase):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights'):
SCREAMING_SNAKE_CASE = int(m_name.split('-')[-1])
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'embeddings')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'LayerNorm')
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4)])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'encoder')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'layer')
SCREAMING_SNAKE_CASE = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'pooler')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'dense')
elif m_name == "embeddings":
trace.append('embeddings')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'embeddings')
if layer_num == 0:
trace.append('word_embeddings')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'word_embeddings')
elif layer_num == 1:
trace.append('position_embeddings')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'position_embeddings')
elif layer_num == 2:
trace.append('token_type_embeddings')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'token_type_embeddings')
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''')
trace.append('weight')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'weight')
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'attention')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'self')
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'attention')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'output')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'LayerNorm')
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'attention')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'output')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'dense')
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'output')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'dense')
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'output')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'LayerNorm')
elif m_name == "_key_dense":
# attention key
trace.append('key')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'key')
elif m_name == "_query_dense":
# attention query
trace.append('query')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'query')
elif m_name == "_value_dense":
# attention value
trace.append('value')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'value')
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'])
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'intermediate')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'dense')
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'output')
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'bias')
elif m_name in ["kernel", "gamma"]:
trace.append('weight')
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'weight')
else:
logger.warning(F'''Ignored {m_name}''')
# for certain layers reshape is necessary
SCREAMING_SNAKE_CASE = '.'.join(_UpperCAmelCase)
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , _UpperCAmelCase) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = array.reshape(pointer.data.shape)
if "kernel" in full_name:
SCREAMING_SNAKE_CASE = array.transpose()
if pointer.shape == array.shape:
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCAmelCase)
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''')
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''')
return model
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''')
SCREAMING_SNAKE_CASE = BertConfig.from_json_file(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = BertModel(_UpperCAmelCase)
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''')
load_tfa_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''')
torch.save(model.state_dict() , _UpperCAmelCase)
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
a_ : Any = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 444
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
debug_launcher(test_script.main)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
debug_launcher(test_ops.main)
| 444
| 1
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a__ = '''bert-base-cased'''
a__ = '''fp16'''
a__ = '''bf16'''
a__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
super().setUp()
_a : str = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def __lowercase ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_a ):
_a : Union[str, Any] = self.dist_env.copy()
_a : int = F"""{i + 1}"""
_a : Dict = strategy
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __lowercase ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_a ):
_a : Dict = self.dist_env.copy()
_a : Any = prefetch_policy
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __lowercase ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_a ):
_a : List[Any] = self.dist_env.copy()
_a : List[Any] = state_dict_type
with mockenv_context(**_a ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = AutoModel.from_pretrained(_a )
for policy in FSDP_AUTO_WRAP_POLICY:
_a : Any = self.dist_env.copy()
_a : str = policy
if policy == "TRANSFORMER_BASED_WRAP":
_a : Optional[Any] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_a : Optional[int] = '''2000'''
with mockenv_context(**_a ):
_a : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_a : Optional[int] = self.dist_env.copy()
_a : Union[str, Any] = '''TRANSFORMER_BASED_WRAP'''
_a : Optional[Any] = '''T5Layer'''
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
with self.assertRaises(_a ) as cm:
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_a : int = self.dist_env.copy()
_a : Optional[Any] = '''SIZE_BASED_WRAP'''
_a : Optional[Any] = '''0'''
with mockenv_context(**_a ):
_a : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __lowercase ( self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_a : List[str] = self.dist_env.copy()
_a : Optional[Any] = mp_dtype
with mockenv_context(**_a ):
_a : Tuple = Accelerator()
if mp_dtype == "fp16":
_a : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_a : str = torch.bfloataa
_a : Tuple = MixedPrecision(param_dtype=_a , reduce_dtype=_a , buffer_dtype=_a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_a )
def __lowercase ( self ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_a : Union[str, Any] = self.dist_env.copy()
_a : Tuple = str(_a ).lower()
with mockenv_context(**_a ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_a ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : List[Any] = 0.82
_a : str = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_a : str = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_a : Any = 1_6_0
_a : str = 1_6_0
_a : str = inspect.getfile(accelerate.test_utils )
_a : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __lowercase ( self ) -> List[Any]:
_a : str = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_a : Union[str, Any] = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_a : Optional[int] = cmd.copy()
for i, strategy in enumerate(_a ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def __lowercase ( self ) -> str:
_a : List[Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_a : Dict = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_a ):
_a : int = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_a : int = len(_a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_a : Dict = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
_a : str = cmd_config[:-1]
_a : Union[str, Any] = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def __lowercase ( self ) -> int:
_a : Optional[int] = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_a : Tuple = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_a : int = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_a ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 14
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=4_0_0 , snake_case_=2_0_0_0 , snake_case_=1 , snake_case_=0.0 , snake_case_=1_6_0_0_0 , snake_case_=True , snake_case_=8_0 , snake_case_=1_6 , snake_case_=6_4 , snake_case_="hann_window" , snake_case_=8_0 , snake_case_=7_6_0_0 , snake_case_=1e-1_0 , snake_case_=True , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = do_normalize
__lowercase = num_mel_bins
__lowercase = hop_length
__lowercase = win_length
__lowercase = win_function
__lowercase = fmin
__lowercase = fmax
__lowercase = mel_floor
__lowercase = return_attention_mask
def A ( self ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def A ( self , snake_case_=False , snake_case_=False ) -> Tuple:
'''simple docstring'''
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
__lowercase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowercase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
def A ( self , snake_case_=False , snake_case_=False ) -> Any:
'''simple docstring'''
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = SpeechTaFeatureExtractor
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = SpeechTaFeatureExtractionTester(self )
def A ( self , snake_case_ ) -> str:
'''simple docstring'''
self.assertTrue(np.all(np.mean(snake_case_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ , axis=0 ) - 1 ) < 1e-3 ) )
def A ( self ) -> int:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
__lowercase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowercase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
__lowercase = feat_extract(snake_case_ , return_tensors='''np''' ).input_values
__lowercase = feat_extract(snake_case_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = ['''longest''', '''max_length''', '''do_not_pad''']
__lowercase = [None, 1_6_0_0, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
__lowercase = feat_extract(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__lowercase = [floats_list((1, x) )[0] for x in lengths]
__lowercase = ['''longest''', '''max_length''', '''do_not_pad''']
__lowercase = [None, 1_6_0_0, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
__lowercase = feat_extract(snake_case_ , max_length=snake_case_ , padding=snake_case_ )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_0_0 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(audio_target=snake_case_ , padding=snake_case_ , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase = np.asarray(snake_case_ )
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case_ ) == len(snake_case_ ) for x, y in zip(snake_case_ , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=snake_case_ )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=snake_case_ )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(snake_case_ , padding='''longest''' , return_tensors='''np''' )[input_name]
__lowercase = feat_extract.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**snake_case_ )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = [len(snake_case_ ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(snake_case_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , snake_case_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**snake_case_ )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = [len(snake_case_ ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(snake_case_ )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(
snake_case_ , padding='''max_length''' , max_length=snake_case_ , truncation=snake_case_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , snake_case_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def A ( self , snake_case_ ) -> str:
'''simple docstring'''
from datasets import load_dataset
__lowercase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowercase = ds.sort('''id''' ).select(range(snake_case_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = SpeechTaFeatureExtractor()
__lowercase = feature_extractor(snake_case_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , snake_case_ , atol=1e-6 ) )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = SpeechTaFeatureExtractor()
__lowercase = feature_extractor(audio_target=snake_case_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case_ , atol=1e-4 ) )
| 639
| 0
|
import argparse
import os
import re
_lowerCamelCase : Optional[Any] ="""src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_lowerCamelCase : Dict =re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_lowerCamelCase : List[Any] =re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = False ):
"""simple docstring"""
with open(UpperCamelCase__, 'r', encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE =f.read()
SCREAMING_SNAKE_CASE =content.split('\n' )
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =0
while line_idx < len(UpperCamelCase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE =len(re.search(r'^(\s*)\S', lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE =[]
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE =line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE =sorted(UpperCamelCase__, key=lambda lowerCAmelCase_ : _re_identifier.search(UpperCamelCase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase__, 'w', encoding='utf-8' ) as f:
f.write('\n'.join(UpperCamelCase__ ) )
elif "\n".join(UpperCamelCase__ ) != content:
return True
def snake_case__ ( lowerCAmelCase_ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[os.path.join(UpperCamelCase__, UpperCamelCase__ ) for f in os.listdir(UpperCamelCase__ ) if f.endswith('.py' )]
SCREAMING_SNAKE_CASE =[sort_auto_mapping(UpperCamelCase__, overwrite=UpperCamelCase__ ) for fname in fnames]
if not overwrite and any(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE =[f for f, d in zip(UpperCamelCase__, UpperCamelCase__ ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(UpperCamelCase__ )}. Run `make style` to fix'
' this.' )
if __name__ == "__main__":
_lowerCamelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowerCamelCase : Union[str, Any] =parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 720
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =3
SCREAMING_SNAKE_CASE =(32, 32)
SCREAMING_SNAKE_CASE =floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(snake_case )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=snake_case ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
return CLIPTextModel(snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,return_dict=snake_case ,)[0]
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE =np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE =unet.half()
SCREAMING_SNAKE_CASE =text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,num_inference_steps=2 ,output_type='np' ,).images
SCREAMING_SNAKE_CASE =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(
snake_case ,torch_dtype=torch.floataa ,)
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(
snake_case ,torch_dtype=torch.floataa ,)
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,num_inference_steps=5 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 252
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'xlm-roberta-xl'
def __init__( self : str,__A : Optional[Any]=2_5_0_8_8_0,__A : str=2_5_6_0,__A : Dict=3_6,__A : int=3_2,__A : int=1_0_2_4_0,__A : Union[str, Any]="gelu",__A : Optional[Any]=0.1,__A : Tuple=0.1,__A : Any=5_1_4,__A : int=1,__A : Dict=0.02,__A : Any=1e-05,__A : str=1,__A : Optional[int]=0,__A : Tuple=2,__A : Dict="absolute",__A : Dict=True,__A : str=None,**__A : Any,):
super().__init__(pad_token_id=__A,bos_token_id=__A,eos_token_id=__A,**__A )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : str = classifier_dropout
class UpperCAmelCase__ ( A ):
@property
def lowerCamelCase_ ( self : int ):
if self.task == "multiple-choice":
_lowerCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 44
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a : Union[str, Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
a : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a : List[Any] = dict(zip(vocab, range(len(vocab))))
a : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
a : Union[str, Any] = Path(tmpdirname)
a : Dict = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
a : Optional[int] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
a : Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
a : Optional[int] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a : List[str] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
a : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
a : Union[str, Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 639
| 0
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = (KDPMaDiscreteScheduler,)
_A : Dict = 10
def UpperCamelCase__ ( self : Optional[int] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if torch_device == "mps":
return
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : str = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = output.prev_sample
__SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
if torch_device == "mps":
return
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.prev_sample
__SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 178
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[int] = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 178
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[str] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[str] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_snake_case : Optional[int] = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = vqa_pipeline(lowerCamelCase_ , top_k=1 )
self.assertEqual(
lowerCamelCase_ , [
[{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}],
[{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}],
] , )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_snake_case : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
_snake_case : Any = 'How many cats are there?'
_snake_case : Any = vqa_pipeline(image=lowerCamelCase_ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}] )
_snake_case : Tuple = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}] )
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
_snake_case : Tuple = './tests/fixtures/tests_samples/COCO/000000039769.png'
_snake_case : int = 'How many cats are there?'
_snake_case : List[str] = vqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_snake_case : Union[str, Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_snake_case : Any = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
| 652
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a_ : Optional[int] = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a_ ( __snake_case : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={}
state_dict.pop('''pixel_mean''' , __snake_case )
state_dict.pop('''pixel_std''' , __snake_case )
lowerCamelCase_ =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase_ =key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
lowerCamelCase_ =int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
lowerCamelCase_ =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowerCamelCase_ =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowerCamelCase_ =key.replace('''layers.2''' , '''proj_out''' )
lowerCamelCase_ =value
lowerCamelCase_ =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Tuple="ybelkada/segment-anything" ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =hf_hub_download(__snake_case , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase_ =SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase_ =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase_ =SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
lowerCamelCase_ =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase_ =SamConfig(
vision_config=__snake_case , )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
lowerCamelCase_ =replace_keys(__snake_case )
lowerCamelCase_ =SamImageProcessor()
lowerCamelCase_ =SamProcessor(image_processor=__snake_case )
lowerCamelCase_ =SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
lowerCamelCase_ =hf_model.to('''cuda''' )
lowerCamelCase_ ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
lowerCamelCase_ =[[[400, 650]]]
lowerCamelCase_ =[[1]]
lowerCamelCase_ =processor(images=np.array(__snake_case ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase_ =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase_ =((75, 275, 1725, 850),)
lowerCamelCase_ =processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase_ =[[[400, 650], [800, 650]]]
lowerCamelCase_ =[[1, 1]]
lowerCamelCase_ =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
a_ : Union[str, Any] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
a_ : Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 676
|
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = SpeechTaTokenizer
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Any = True
def _UpperCAmelCase ( self: List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = SpeechTaTokenizer(__lowerCAmelCase )
__UpperCAmelCase = AddedToken("<mask>" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
__UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Union[str, Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = "this is a test"
__UpperCAmelCase = "this is a test"
return input_text, output_text
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Union[str, Any]=False , __lowerCAmelCase: Dict=20 , __lowerCAmelCase: Dict=5 ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.get_input_output_texts(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = "<pad>"
__UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) , 81 )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _UpperCAmelCase ( self: int ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__UpperCAmelCase = tokenizer.add_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__UpperCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__UpperCAmelCase = tokenizer.add_special_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _UpperCAmelCase ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _UpperCAmelCase ( self: int ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
__UpperCAmelCase = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__lowerCAmelCase , )
| 286
|
def __lowerCAmelCase ( A_ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__UpperCAmelCase = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
a_ = input("""Enter a string """).strip()
a_ = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 286
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowerCamelCase_ ( lowercase_ ):
"""simple docstring"""
a_ =field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ =Features({"""audio""": Audio()} )
a_ =Features({"""transcription""": Value("""string""" )} )
a_ ="""audio"""
a_ ="""transcription"""
def _lowercase ( self : List[str] , _a : Dict ) -> List[Any]:
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , _lowercase ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
__lowerCamelCase : List[Any] = copy.deepcopy(self )
__lowerCamelCase : Dict = self.input_schema.copy()
__lowerCamelCase : List[str] = features[self.audio_column]
__lowerCamelCase : List[Any] = input_schema
return task_template
@property
def _lowercase ( self : Dict ) -> int:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 459
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=1_3 , lowerCamelCase : Optional[int]=7 , lowerCamelCase : List[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Dict=9_9 , lowerCamelCase : Optional[Any]=3_2 , lowerCamelCase : int=5 , lowerCamelCase : Dict=4 , lowerCamelCase : List[Any]=3_7 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : str=5_1_2 , lowerCamelCase : Optional[Any]=1_6 , lowerCamelCase : Any=2 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : str=4 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_choices
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_attention_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self : Any ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __a ( self : Any ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Union[str, Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self : List[str] ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase )
a__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def __a ( self : str ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase )
a__ = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
a__ = model(lowerCamelCase )[0]
a__ = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , lowerCamelCase )
# compare the actual values for a slice.
a__ = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase )
a__ = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
a__ = model(lowerCamelCase )[0]
# compare the actual values for a slice.
a__ = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
| 289
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( __lowerCAmelCase ):
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = Rectangle(height=0.5 , width=0.5 )
a__ = Rectangle(height=0.25 , width=0.25 )
a__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("CPU" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("GPU" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Model" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase )
a__ = []
a__ = []
a__ = []
for i, rect in enumerate(lowerCamelCase ):
rect.set_stroke(lowerCamelCase )
a__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCamelCase , buff=0.0 )
self.add(lowerCamelCase )
model_cpu_arr.append(lowerCamelCase )
self.add(*lowerCamelCase , *lowerCamelCase , *lowerCamelCase )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Loaded Checkpoint" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCamelCase )
a__ = []
a__ = []
for i, rect in enumerate(lowerCamelCase ):
a__ = fill.copy().set_fill(lowerCamelCase , opacity=0.7 )
target.move_to(lowerCamelCase )
ckpt_arr.append(lowerCamelCase )
a__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCamelCase )
self.add(*lowerCamelCase , *lowerCamelCase )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase , lowerCamelCase )
a__ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase )
a__ = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Disk" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) , Write(lowerCamelCase , run_time=1 ) , Create(lowerCamelCase , run_time=1 ) )
a__ = []
for i, rect in enumerate(lowerCamelCase ):
a__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCamelCase , run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(FadeOut(lowerCamelCase ) )
a__ = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) )
self.play(
FadeOut(lowerCamelCase , lowerCamelCase , *lowerCamelCase , *lowerCamelCase ) , )
self.wait()
| 289
| 1
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def A__ ( A__ , A__=False ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A__ ( A__ , A__ , A__=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ""
else:
_UpperCAmelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def A__ ( A__ , A__ , A__ ) -> Any:
'''simple docstring'''
_UpperCAmelCase = dct.pop(A__ )
_UpperCAmelCase = val
def A__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
_UpperCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(A__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = int(deit_name[-6:-4] )
_UpperCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_UpperCAmelCase = 192
_UpperCAmelCase = 768
_UpperCAmelCase = 12
_UpperCAmelCase = 3
elif deit_name[9:].startswith("small" ):
_UpperCAmelCase = 384
_UpperCAmelCase = 1536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_UpperCAmelCase = 1024
_UpperCAmelCase = 4096
_UpperCAmelCase = 24
_UpperCAmelCase = 16
# load original model from timm
_UpperCAmelCase = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
_UpperCAmelCase = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
_UpperCAmelCase = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_UpperCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_UpperCAmelCase = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase = encoding["pixel_values"]
_UpperCAmelCase = model(A__ )
_UpperCAmelCase = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1E-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 426
| 0
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__SCREAMING_SNAKE_CASE = 500000
__SCREAMING_SNAKE_CASE = os.path.split(__file__)
__SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCAmelCase ( _lowerCamelCase , **_lowerCamelCase ):
A : int = dataset.map(**__UpperCamelCase )
@get_duration
def UpperCAmelCase ( _lowerCamelCase , **_lowerCamelCase ):
A : Optional[Any] = dataset.filter(**__UpperCamelCase )
def UpperCAmelCase ( ):
A : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A : str = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
A : Tuple = generate_example_dataset(
os.path.join(__UpperCamelCase , "dataset.arrow" ) , __UpperCamelCase , num_examples=__UpperCamelCase )
A : Dict = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__UpperCamelCase )
def tokenize(_lowerCamelCase ):
return tokenizer(examples["text"] )
A : Optional[int] = map(__UpperCamelCase )
A : int = map(__UpperCamelCase , batched=__UpperCamelCase )
A : int = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="numpy" ):
A : List[str] = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="pandas" ):
A : str = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
A : str = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
A : int = map(__UpperCamelCase , function=lambda _lowerCamelCase : None , batched=__UpperCamelCase )
A : Tuple = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
A : Union[str, Any] = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , "wb" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 720
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowerCamelCase (_a ):
_lowercase = """fnet"""
def __init__( self: Optional[Any],A_: str=3_2000,A_: Optional[Any]=768,A_: str=12,A_: List[str]=3072,A_: Union[str, Any]="gelu_new",A_: Optional[int]=0.1,A_: List[str]=512,A_: Optional[Any]=4,A_: Optional[int]=0.0_2,A_: Optional[Any]=1E-12,A_: int=False,A_: Any=512,A_: Optional[Any]=3,A_: List[Any]=1,A_: Tuple=2,**A_: Any,):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_tpu_fourier_optimizations
__UpperCamelCase = tpu_short_seq_length
| 1
|
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( A__ ):
@staticmethod
@abstractmethod
def snake_case_ ( a__):
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self):
raise NotImplementedError()
| 720
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526
| 0
|
from copy import deepcopy
class __A :
def __init__(self , __magic_name__ = None , __magic_name__ = None ):
if arr is None and size is not None:
lowerCamelCase__ : int = size
lowerCamelCase__ : Union[str, Any] = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : str = len(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
lowerCamelCase__ : List[Any] = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase__ : List[str] = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _snake_case (__magic_name__ ):
return index + (index & (-index))
@staticmethod
def _snake_case (__magic_name__ ):
return index - (index & (-index))
def _snake_case (self , __magic_name__ , __magic_name__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase__ : int = self.next_(__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def _snake_case (self , __magic_name__ ):
if right == 0:
return 0
lowerCamelCase__ : Optional[int] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase__ : Dict = self.prev(__magic_name__ )
return result
def _snake_case (self , __magic_name__ , __magic_name__ ):
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def _snake_case (self , __magic_name__ ):
return self.query(__magic_name__ , index + 1 )
def _snake_case (self , __magic_name__ ):
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase__ : Dict = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase__ : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def _A (UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ) ->Any:
'''simple docstring'''
lowerCamelCase__ : List[Any] = b.T
lowerCamelCase__ : List[Any] = np.sum(np.square(UpperCamelCase ) , axis=1 )
lowerCamelCase__ : Dict = np.sum(np.square(UpperCamelCase ) , axis=0 )
lowerCamelCase__ : Tuple = np.matmul(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def _A (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ) ->str:
'''simple docstring'''
lowerCamelCase__ : List[str] = x.reshape(-1 , 3 )
lowerCamelCase__ : Tuple = squared_euclidean_distance(UpperCamelCase , UpperCamelCase )
return np.argmin(UpperCamelCase , axis=1 )
class __A ( A_ ):
UpperCamelCase :Any = ['''pixel_values''']
def __init__(self , __magic_name__ = None , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase__ : int = size if size is not None else {"""height""": 256, """width""": 256}
lowerCamelCase__ : Optional[Any] = get_size_dict(__magic_name__ )
lowerCamelCase__ : Tuple = np.array(__magic_name__ ) if clusters is not None else None
lowerCamelCase__ : str = do_resize
lowerCamelCase__ : Tuple = size
lowerCamelCase__ : Optional[Any] = resample
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : int = do_color_quantize
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase__ : List[Any] = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
__magic_name__ , size=(size["""height"""], size["""width"""]) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ = None , ):
lowerCamelCase__ : int = rescale(image=__magic_name__ , scale=1 / 1_27.5 , data_format=__magic_name__ )
lowerCamelCase__ : Any = image - 1
return image
def _snake_case (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : int = size if size is not None else self.size
lowerCamelCase__ : Union[str, Any] = get_size_dict(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowerCamelCase__ : Tuple = clusters if clusters is not None else self.clusters
lowerCamelCase__ : Any = np.array(__magic_name__ )
lowerCamelCase__ : Optional[Any] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ : List[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase__ : Any = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase__ : Dict = [self.normalize(image=__magic_name__ ) for image in images]
if do_color_quantize:
lowerCamelCase__ : List[Any] = [to_channel_dimension_format(__magic_name__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowerCamelCase__ : Dict = np.array(__magic_name__ )
lowerCamelCase__ : Optional[Any] = color_quantize(__magic_name__ , __magic_name__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowerCamelCase__ : Optional[Any] = images.shape[0]
lowerCamelCase__ : str = images.reshape(__magic_name__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowerCamelCase__ : Optional[int] = list(__magic_name__ )
else:
lowerCamelCase__ : str = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase__ : Tuple = {"""input_ids""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 157
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
@dataclass
class a ( __a ):
"""simple docstring"""
__lowerCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **snake_case_ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase: Tuple = deprecated_arg[3:]
setattr(self , lowerCAmelCase_ , not kwargs.pop(lowerCAmelCase_ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase: Optional[Any] = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase: int = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase: List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = field(default=__a , metadata={"""help""": """Trace the models using torchscript"""} )
__lowerCAmelCase = field(default=__a , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__lowerCAmelCase = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase: int = torch.device("""cpu""" )
__UpperCAmelCase: int = 0
elif is_torch_tpu_available():
__UpperCAmelCase: Union[str, Any] = xm.xla_device()
__UpperCAmelCase: Optional[Any] = 0
else:
__UpperCAmelCase: List[str] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase: int = torch.cuda.device_count()
return device, n_gpu
@property
def lowercase_ ( self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowercase_ ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowercase_ ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowercase_ ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 719
|
'''simple docstring'''
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
SCREAMING_SNAKE_CASE_ = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE_ = len(train_data)
SCREAMING_SNAKE_CASE_ = 0.009
def UpperCamelCase__ ( _lowercase : Union[str, Any] , _lowercase : List[Any]="train" ) -> int:
return calculate_hypothesis_value(_lowercase , _lowercase ) - output(
_lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : Dict ) -> Optional[Any]:
__UpperCAmelCase: List[Any] = 0
for i in range(len(_lowercase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase__ ( _lowercase : List[str] , _lowercase : List[str] ) -> Optional[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase__ ( _lowercase : Dict , _lowercase : str ) -> Dict:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase__ ( _lowercase : Optional[int] , _lowercase : Union[str, Any]=m ) -> Optional[Any]:
__UpperCAmelCase: int = 0
for i in range(_lowercase ):
if index == -1:
summation_value += _error(_lowercase )
else:
summation_value += _error(_lowercase ) * train_data[i][0][index]
return summation_value
def UpperCamelCase__ ( _lowercase : Tuple ) -> Optional[int]:
__UpperCAmelCase: Any = summation_of_cost_derivative(_lowercase , _lowercase ) / m
return cost_derivative_value
def UpperCamelCase__ ( ) -> Tuple:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__UpperCAmelCase: str = 0.00_00_02
__UpperCAmelCase: List[Any] = 0
__UpperCAmelCase: Optional[Any] = 0
while True:
j += 1
__UpperCAmelCase: Any = [0, 0, 0, 0]
for i in range(0 , len(_lowercase ) ):
__UpperCAmelCase: str = get_cost_derivative(i - 1 )
__UpperCAmelCase: List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowercase , _lowercase , atol=_lowercase , rtol=_lowercase , ):
break
__UpperCAmelCase: List[Any] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def UpperCamelCase__ ( ) -> Dict:
for i in range(len(_lowercase ) ):
print(("""Actual output value:""", output(_lowercase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(_lowercase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 466
| 0
|
'''simple docstring'''
import requests
SCREAMING_SNAKE_CASE__ = 'YOUR API KEY'
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = giphy_api_key )-> list:
UpperCamelCase = """+""".join(query.split() )
UpperCamelCase = F"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
UpperCamelCase = requests.get(__UpperCamelCase ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 301
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowercase__ ( __UpperCamelCase = "AAPL" )-> str:
UpperCamelCase = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCamelCase = BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
UpperCamelCase = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 301
| 1
|
def a (_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
SCREAMING_SNAKE_CASE_ = [int(_lowerCAmelCase ) for i in num_string]
SCREAMING_SNAKE_CASE_ = 1
for i in range(0 , len(_lowerCAmelCase ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE_ = str(_lowerCAmelCase )
steps += 1
return steps
def a (_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
SCREAMING_SNAKE_CASE_ = [int(_lowerCAmelCase ) for i in num_string]
SCREAMING_SNAKE_CASE_ = 0
for i in range(0 , len(_lowerCAmelCase ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE_ = str(_lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 89
| 1
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
SCREAMING_SNAKE_CASE_: Any =get_logger(__name__)
class __A :
def __init__(self : str , __a : Optional[int] , __a : int=None ):
UpperCAmelCase_ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , __a , getattr(__a , __a ) )
UpperCAmelCase_ = module._original_module if isinstance(__a , _PatchedModuleObj ) else module
class __A :
a__ : List[Any] = []
def __init__(self : Dict , __a : List[Any] , __a : str , __a : Dict , __a : int=None ):
UpperCAmelCase_ = obj
UpperCAmelCase_ = target
UpperCAmelCase_ = new
UpperCAmelCase_ = target.split("." )[0]
UpperCAmelCase_ = {}
UpperCAmelCase_ = attrs or []
def __enter__(self : Union[str, Any] ):
*UpperCAmelCase_ , UpperCAmelCase_ = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a ) ):
try:
UpperCAmelCase_ = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCAmelCase_ = getattr(self.obj , __a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCAmelCase_ = obj_attr
# patch at top level
setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) )
UpperCAmelCase_ = getattr(self.obj , __a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) )
UpperCAmelCase_ = getattr(__a , __a )
# finally set the target attribute
setattr(__a , __a , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCAmelCase_ = getattr(import_module(".".join(__a ) ) , __a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __a ) is attr_value:
UpperCAmelCase_ = getattr(self.obj , __a )
setattr(self.obj , __a , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCAmelCase_ = globals()["__builtins__"][target_attr]
setattr(self.obj , __a , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__(self : str , *__a : Optional[int] ):
for attr in list(self.original ):
setattr(self.obj , __a , self.original.pop(__a ) )
def _lowercase (self : int ):
self.__enter__()
self._active_patches.append(self )
def _lowercase (self : int ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 78
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = UnCLIPImageVariationPipeline
SCREAMING_SNAKE_CASE = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
SCREAMING_SNAKE_CASE = IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
SCREAMING_SNAKE_CASE = False
@property
def UpperCamelCase ( self : int)-> Union[str, Any]:
return 32
@property
def UpperCamelCase ( self : List[Any])-> List[str]:
return 32
@property
def UpperCamelCase ( self : Dict)-> Dict:
return self.time_input_dim
@property
def UpperCamelCase ( self : Dict)-> Optional[int]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : str)-> str:
return 1_00
@property
def UpperCamelCase ( self : Optional[Any])-> Union[str, Any]:
__lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
return tokenizer
@property
def UpperCamelCase ( self : List[str])-> List[Any]:
torch.manual_seed(0)
__lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(snake_case_)
@property
def UpperCamelCase ( self : List[str])-> Dict:
torch.manual_seed(0)
__lowerCAmelCase =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(snake_case_)
@property
def UpperCamelCase ( self : str)-> Union[str, Any]:
torch.manual_seed(0)
__lowerCAmelCase ={
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
__lowerCAmelCase =UnCLIPTextProjModel(**snake_case_)
return model
@property
def UpperCamelCase ( self : Optional[int])-> List[str]:
torch.manual_seed(0)
__lowerCAmelCase ={
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
__lowerCAmelCase =UNetaDConditionModel(**snake_case_)
return model
@property
def UpperCamelCase ( self : int)-> str:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCamelCase ( self : Optional[int])-> List[Any]:
torch.manual_seed(0)
__lowerCAmelCase =UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def UpperCamelCase ( self : Tuple)-> int:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1)
__lowerCAmelCase =UNetaDModel(**self.dummy_super_res_kwargs)
return model
def UpperCamelCase ( self : List[Any])-> Tuple:
__lowerCAmelCase =self.dummy_decoder
__lowerCAmelCase =self.dummy_text_proj
__lowerCAmelCase =self.dummy_text_encoder
__lowerCAmelCase =self.dummy_tokenizer
__lowerCAmelCase =self.dummy_super_res_first
__lowerCAmelCase =self.dummy_super_res_last
__lowerCAmelCase =UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , )
__lowerCAmelCase =UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , )
__lowerCAmelCase =CLIPImageProcessor(crop_size=32 , size=32)
__lowerCAmelCase =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCamelCase ( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int]=0 , snake_case_ : List[str]=True)-> List[str]:
__lowerCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_)).to(snake_case_)
if str(snake_case_).startswith("""mps"""):
__lowerCAmelCase =torch.manual_seed(snake_case_)
else:
__lowerCAmelCase =torch.Generator(device=snake_case_).manual_seed(snake_case_)
if pil_image:
__lowerCAmelCase =input_image * 0.5 + 0.5
__lowerCAmelCase =input_image.clamp(0 , 1)
__lowerCAmelCase =input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__lowerCAmelCase =DiffusionPipeline.numpy_to_pil(snake_case_)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCamelCase ( self : Optional[Any])-> str:
__lowerCAmelCase ="""cpu"""
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =self.pipeline_class(**snake_case_)
__lowerCAmelCase =pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
__lowerCAmelCase =pipe(**snake_case_)
__lowerCAmelCase =output.images
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
__lowerCAmelCase =pipe(
**snake_case_ , return_dict=snake_case_ , )[0]
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase =np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : Optional[Any])-> List[Any]:
__lowerCAmelCase ="""cpu"""
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =self.pipeline_class(**snake_case_)
__lowerCAmelCase =pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
__lowerCAmelCase =pipe(**snake_case_)
__lowerCAmelCase =output.images
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
__lowerCAmelCase =pipe(
**snake_case_ , return_dict=snake_case_ , )[0]
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase =np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : int)-> Optional[Any]:
__lowerCAmelCase ="""cpu"""
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =self.pipeline_class(**snake_case_)
__lowerCAmelCase =pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
__lowerCAmelCase =[
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
__lowerCAmelCase =pipe(**snake_case_)
__lowerCAmelCase =output.images
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
__lowerCAmelCase =[
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
__lowerCAmelCase =pipe(
**snake_case_ , return_dict=snake_case_ , )[0]
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__lowerCAmelCase =np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : Tuple)-> Optional[int]:
__lowerCAmelCase =torch.device("""cpu""")
class __a :
SCREAMING_SNAKE_CASE = 1
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =self.pipeline_class(**snake_case_)
__lowerCAmelCase =pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =torch.Generator(device=snake_case_).manual_seed(0)
__lowerCAmelCase =pipe.decoder.dtype
__lowerCAmelCase =1
__lowerCAmelCase =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCAmelCase =pipe.prepare_latents(
snake_case_ , dtype=snake_case_ , device=snake_case_ , generator=snake_case_ , latents=snake_case_ , scheduler=DummyScheduler())
__lowerCAmelCase =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCAmelCase =pipe.prepare_latents(
snake_case_ , dtype=snake_case_ , device=snake_case_ , generator=snake_case_ , latents=snake_case_ , scheduler=DummyScheduler())
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
__lowerCAmelCase =pipe(
**snake_case_ , decoder_latents=snake_case_ , super_res_latents=snake_case_).images
__lowerCAmelCase =self.get_dummy_inputs(snake_case_ , pil_image=snake_case_)
# Don't pass image, instead pass embedding
__lowerCAmelCase =pipeline_inputs.pop("""image""")
__lowerCAmelCase =pipe.image_encoder(snake_case_).image_embeds
__lowerCAmelCase =pipe(
**snake_case_ , decoder_latents=snake_case_ , super_res_latents=snake_case_ , image_embeddings=snake_case_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
__lowerCAmelCase =torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCAmelCase =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case_ , expected_max_diff=snake_case_)
@skip_mps
def UpperCamelCase ( self : int)-> int:
__lowerCAmelCase =torch_device == """cpu"""
__lowerCAmelCase =True
__lowerCAmelCase =[
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=snake_case_ , relax_max_difference=snake_case_ , additional_params_copy_to_batched_inputs=snake_case_ , )
def UpperCamelCase ( self : List[Any])-> int:
__lowerCAmelCase =[
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCAmelCase =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=snake_case_ , additional_params_copy_to_batched_inputs=snake_case_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=snake_case_)
@skip_mps
def UpperCamelCase ( self : Optional[Any])-> Tuple:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase ( self : Optional[int])-> Tuple:
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self : Union[str, Any])-> Any:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : Any)-> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Optional[Any])-> List[Any]:
__lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""")
__lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""")
__lowerCAmelCase =UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa)
__lowerCAmelCase =pipeline.to(snake_case_)
pipeline.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =torch.Generator(device="""cpu""").manual_seed(0)
__lowerCAmelCase =pipeline(
snake_case_ , generator=snake_case_ , output_type="""np""" , )
__lowerCAmelCase =output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ , 15)
| 354
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase : Optional[int] = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : List[str] = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : List[Any] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : str = numpy.dtype(numpy.uintaa).newbyteorder(">")
return numpy.frombuffer(bytestream.read(4) , dtype=_lowerCamelCase)[0]
@deprecated(_lowerCamelCase , "Please use tf.data to implement this functionality.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Any:
'''simple docstring'''
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=_lowerCamelCase) as bytestream:
__UpperCamelCase : str = _readaa(_lowerCamelCase)
if magic != 2_051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name))
__UpperCamelCase : List[str] = _readaa(_lowerCamelCase)
__UpperCamelCase : Dict = _readaa(_lowerCamelCase)
__UpperCamelCase : Optional[int] = _readaa(_lowerCamelCase)
__UpperCamelCase : Dict = bytestream.read(rows * cols * num_images)
__UpperCamelCase : Optional[int] = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta)
__UpperCamelCase : Dict = data.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1)
return data
@deprecated(_lowerCamelCase , "Please use tf.one_hot on tensors.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str]) -> str:
'''simple docstring'''
__UpperCamelCase : str = labels_dense.shape[0]
__UpperCamelCase : str = numpy.arange(_lowerCamelCase) * num_classes
__UpperCamelCase : str = numpy.zeros((num_labels, num_classes))
__UpperCamelCase : Tuple = 1
return labels_one_hot
@deprecated(_lowerCamelCase , "Please use tf.data to implement this functionality.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[int]=10) -> Dict:
'''simple docstring'''
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=_lowerCamelCase) as bytestream:
__UpperCamelCase : int = _readaa(_lowerCamelCase)
if magic != 2_049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name))
__UpperCamelCase : Any = _readaa(_lowerCamelCase)
__UpperCamelCase : List[Any] = bytestream.read(_lowerCamelCase)
__UpperCamelCase : Dict = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta)
if one_hot:
return _dense_to_one_hot(_lowerCamelCase , _lowerCamelCase)
return labels
class lowerCamelCase__ :
'''simple docstring'''
@deprecated(
a , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :int , a :Any , a :List[str] , a :Union[str, Any]=False , a :List[Any]=False , a :Dict=dtypes.floataa , a :int=True , a :Optional[int]=None , ) -> List[str]:
__UpperCamelCase , __UpperCamelCase : Optional[int] = random_seed.get_seed(a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase : Optional[Any] = dtypes.as_dtype(a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
__UpperCamelCase : str = 1_0_0_0_0
__UpperCamelCase : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase : List[Any] = images.astype(numpy.floataa )
__UpperCamelCase : Optional[Any] = numpy.multiply(a , 1.0 / 255.0 )
__UpperCamelCase : Optional[Any] = images
__UpperCamelCase : List[Any] = labels
__UpperCamelCase : str = 0
__UpperCamelCase : Union[str, Any] = 0
@property
def _lowerCamelCase ( self :Any ) -> Any:
return self._images
@property
def _lowerCamelCase ( self :Any ) -> Dict:
return self._labels
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self._num_examples
@property
def _lowerCamelCase ( self :Tuple ) -> Dict:
return self._epochs_completed
def _lowerCamelCase ( self :Any , a :Optional[int] , a :Optional[int]=False , a :int=True ) -> Optional[int]:
if fake_data:
__UpperCamelCase : Any = [1] * 7_8_4
__UpperCamelCase : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a )],
[fake_label for _ in range(a )],
)
__UpperCamelCase : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
__UpperCamelCase : int = self.images[perma]
__UpperCamelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase : Optional[int] = self._num_examples - start
__UpperCamelCase : Optional[int] = self._images[start : self._num_examples]
__UpperCamelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
__UpperCamelCase : Optional[Any] = self.images[perm]
__UpperCamelCase : Tuple = self.labels[perm]
# Start next epoch
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Union[str, Any] = batch_size - rest_num_examples
__UpperCamelCase : List[str] = self._index_in_epoch
__UpperCamelCase : Dict = self._images[start:end]
__UpperCamelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCamelCase , "Please write your own downloading logic.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]) -> Tuple:
'''simple docstring'''
if not gfile.Exists(_lowerCamelCase):
gfile.MakeDirs(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not gfile.Exists(_lowerCamelCase):
urllib.request.urlretrieve(_lowerCamelCase , _lowerCamelCase) # noqa: S310
with gfile.GFile(_lowerCamelCase) as f:
__UpperCamelCase : Any = f.size()
print("Successfully downloaded" , _lowerCamelCase , _lowerCamelCase , "bytes.")
return filepath
@deprecated(
_lowerCamelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=dtypes.floataa , _lowerCamelCase : Any=True , _lowerCamelCase : Union[str, Any]=5_000 , _lowerCamelCase : str=None , _lowerCamelCase : Optional[int]=DEFAULT_SOURCE_URL , ) -> List[Any]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCamelCase , one_hot=_lowerCamelCase , dtype=_lowerCamelCase , seed=_lowerCamelCase)
__UpperCamelCase : Optional[int] = fake()
__UpperCamelCase : Tuple = fake()
__UpperCamelCase : List[str] = fake()
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase)
if not source_url: # empty string check
__UpperCamelCase : str = DEFAULT_SOURCE_URL
__UpperCamelCase : Optional[int] = "train-images-idx3-ubyte.gz"
__UpperCamelCase : Dict = "train-labels-idx1-ubyte.gz"
__UpperCamelCase : List[str] = "t10k-images-idx3-ubyte.gz"
__UpperCamelCase : List[str] = "t10k-labels-idx1-ubyte.gz"
__UpperCamelCase : Optional[int] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_images_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : int = _extract_images(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_labels_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : int = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase)
__UpperCamelCase : int = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_images_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : Optional[int] = _extract_images(_lowerCamelCase)
__UpperCamelCase : str = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_labels_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : List[str] = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase)
if not 0 <= validation_size <= len(_lowerCamelCase):
__UpperCamelCase : str = (
"Validation size should be between 0 and "
F'{len(_lowerCamelCase)}. Received: {validation_size}.'
)
raise ValueError(_lowerCamelCase)
__UpperCamelCase : Any = train_images[:validation_size]
__UpperCamelCase : Optional[Any] = train_labels[:validation_size]
__UpperCamelCase : Optional[int] = train_images[validation_size:]
__UpperCamelCase : Tuple = train_labels[validation_size:]
__UpperCamelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
__UpperCamelCase : Union[str, Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
__UpperCamelCase : str = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
__UpperCamelCase : Optional[Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase)
| 94
| 1
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Tuple = DebertaVaTokenizer
_UpperCamelCase : int = DebertaVaTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Any = True
def __a ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : int = DebertaVaTokenizer(_lowerCAmelCase , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = 'this is a test'
_lowercase : Tuple = 'this is a test'
return input_text, output_text
def __a ( self ):
_lowercase : List[str] = '<pad>'
_lowercase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
_lowercase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(_lowerCAmelCase ) , 3_0_0_0_1 )
def __a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __a ( self ):
# fmt: off
_lowercase : List[str] = ' \tHeLLo!how \n Are yoU? '
_lowercase : Any = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_lowercase : str = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
_lowercase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
_lowercase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __a ( self ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __a ( self ):
pass
def __a ( self ):
# fmt: off
_lowercase : Tuple = 'I was born in 92000, and this is falsé.'
_lowercase : Tuple = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : List[Any] = DebertaVaTokenizer(_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = DebertaVaTokenizerFast(_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
# fmt: off
_lowercase : Any = 'I was born in 92000, and this is falsé.'
_lowercase : Optional[int] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : Union[str, Any] = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
# fmt: off
_lowercase : List[str] = 'I was born in 92000, and this is falsé.'
_lowercase : Union[str, Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowercase : List[Any] = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
# fmt: off
_lowercase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : int = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
# fmt: off
_lowercase : Any = ' \tHeLLo!how \n Are yoU? '
_lowercase : List[Any] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_lowercase : Optional[Any] = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
_lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.get_tokenizer()
_lowercase : Tuple = self.get_rust_tokenizer()
_lowercase : Union[str, Any] = 'I was born in 92000, and this is falsé.'
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
_lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowercase : List[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = self.get_rust_tokenizer()
_lowercase : Optional[Any] = tokenizer.encode(_lowerCAmelCase )
_lowercase : Any = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = 'This is a test'
_lowercase : Optional[int] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
_lowercase : Dict = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_lowercase : int = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_lowercase : Union[str, Any] = DebertaVaTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowercase : int = DebertaVaTokenizerFast(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowercase : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Any = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = rust_tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# fmt: off
_lowercase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : List[str] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
_lowercase : Optional[int] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_lowercase : List[str] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowercase : int = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Any = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = DebertaVaTokenizer(_lowerCAmelCase )
_lowercase : List[Any] = tokenizer.encode('sequence builders' )
_lowercase : str = tokenizer.encode('multi-sequence build' )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _lowerCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _lowerCAmelCase , )
@slow
def __a ( self ):
# fmt: off
_lowercase : Union[str, Any] = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 66
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Optional[Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 142
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Dict = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["""PoolFormerFeatureExtractor"""]
UpperCAmelCase_ : str = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 440
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 440
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'informer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.0_5 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0_2 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ):
# time series specific configuration
UpperCAmelCase__ : List[str] = prediction_length
UpperCAmelCase__ : Optional[Any] = context_length or prediction_length
UpperCAmelCase__ : str = distribution_output
UpperCAmelCase__ : int = loss
UpperCAmelCase__ : Optional[Any] = input_size
UpperCAmelCase__ : Any = num_time_features
UpperCAmelCase__ : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase__ : Union[str, Any] = scaling
UpperCAmelCase__ : Optional[Any] = num_dynamic_real_features
UpperCAmelCase__ : List[str] = num_static_real_features
UpperCAmelCase__ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : List[str] = cardinality
else:
UpperCAmelCase__ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : str = embedding_dimension
else:
UpperCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ : Any = d_model
UpperCAmelCase__ : int = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : int = encoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_layers
UpperCAmelCase__ : Tuple = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : List[str] = activation_dropout
UpperCAmelCase__ : Any = encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase__ : Tuple = activation_function
UpperCAmelCase__ : Dict = init_std
UpperCAmelCase__ : str = use_cache
# Informer
UpperCAmelCase__ : Union[str, Any] = attention_type
UpperCAmelCase__ : int = sampling_factor
UpperCAmelCase__ : Any = distil
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 79
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'facebook/bart-large-mnli'
__lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowerCamelCase = 'text_classifier'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSequenceClassification
__lowerCamelCase = ['text', ['text']]
__lowerCamelCase = ['text']
def __UpperCAmelCase ( self ):
super().setup()
UpperCAmelCase__ : Optional[Any] = self.model.config
UpperCAmelCase__ : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase__ : Dict = int(_lowerCAmelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = labels
return self.pre_processor(
[text] * len(_lowerCAmelCase ) , [f"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : str = outputs.logits
UpperCAmelCase__ : List[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 79
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_A = HfApi()
_A = {}
# fmt: off
_A = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_A = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_A = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_A = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_A = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_A = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_A = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_A = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_A = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_A = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_A = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_A = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_A = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_A = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_A = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_A = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_A = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith("CompVis"):
_A = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_A = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_A = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_A = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_A = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"{mod.modelId} has passed successfully!!!")
| 702
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Dict ) -> Dict:
snake_case = inspect.getfile(accelerate.test_utils )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def UpperCAmelCase(self : List[Any] ) -> int:
print(f'Found {torch.cuda.device_count()} devices.' )
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase(self : Any ) -> Optional[Any]:
print(f'Found {torch.cuda.device_count()} devices.' )
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase(self : Union[str, Any] ) -> Dict:
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase(self : List[str] ) -> Dict:
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
_A = Accelerator()
_A = (accelerator.state.process_index + 2, 10)
_A = torch.randint(0, 10, shape).to(accelerator.device)
_A = ""
_A = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_A = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_A = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 294
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = '''▁'''
snake_case = {'''vocab_file''': '''spiece.model'''}
snake_case = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
snake_case = {
'''google/pegasus-xsum''': 5_1_2,
}
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : Union[str, Any]="<mask_2>" , __lowerCamelCase : Optional[Any]="<mask_1>" , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=1_0_3 , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__lowerCamelCase )}, but is"""
f""" {type(__lowerCamelCase )}""" )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__lowerCamelCase ) , self.offset - 1 )
]
if len(set(__lowerCamelCase ) ) != len(__lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , mask_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token_sent=__lowerCamelCase , offset=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_snake_case = mask_token_sent
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# add special tokens to encoder dict
_snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_snake_case = {v: k for k, v in self.encoder.items()}
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_snake_case = self.sp_model.piece_to_id(__lowerCamelCase )
return sp_id + self.offset
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_snake_case = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
return 1
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCAmelCase ( self : str , __lowerCamelCase : List , __lowerCamelCase : Optional[List] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(__lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 103
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE = 10
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**A )
return config
def _UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def _UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A ,beta_end=A )
def _UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A ,use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 341
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> bytes:
_a : Optional[int] ="""https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_a : str =requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(_UpperCAmelCase ).content
if __name__ == "__main__":
A__: Optional[Any] = input('''Enter Video/IGTV url: ''').strip()
A__: Optional[int] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 703
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A__: List[str] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : int =torch.manual_seed(0 )
_a : Any =pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
_a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a : Tuple =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 506
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
try:
_lowerCamelCase : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCamelCase : str = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCamelCase : Optional[int] = strtobool(_lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ : Any = parse_flag_from_env('RUN_SLOW', default=False)
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
return unittest.skip("Test was skipped" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]=None ):
"""simple docstring"""
if test_case is None:
return partial(_lowerCAmelCase , version=_lowerCAmelCase )
return unittest.skipUnless(is_torch_version(">=" , _lowerCAmelCase ) , F'test requires torch version >= {version}' )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCAmelCase )
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = True
@classmethod
def lowerCamelCase_ ( cls : Any ):
_lowerCamelCase : List[str] = tempfile.mkdtemp()
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase_ ( self : Optional[int] ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__A )
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int,__A : Union[mock.Mock, List[mock.Mock]] ):
_lowerCamelCase : Tuple = mocks if isinstance(__A,(tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = AcceleratorState()
_lowerCamelCase : str = tensor[None].clone().to(state.device )
_lowerCamelCase : List[Any] = gather(_lowerCAmelCase ).cpu()
_lowerCamelCase : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _lowerCAmelCase ):
return False
return True
class UpperCAmelCase__ :
def __init__( self : int,__A : Any,__A : List[Any],__A : str ):
_lowerCamelCase : Tuple = returncode
_lowerCamelCase : List[str] = stdout
_lowerCamelCase : Any = stderr
async def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : int ):
"""simple docstring"""
while True:
_lowerCamelCase : Optional[Any] = await stream.readline()
if line:
callback(_lowerCAmelCase )
else:
break
async def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(_lowerCAmelCase ) )
_lowerCamelCase : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[str] = []
def tee(_lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int="" ):
_lowerCamelCase : Optional[Any] = line.decode("utf-8" ).rstrip()
sink.append(_lowerCAmelCase )
if not quiet:
print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label="stderr:" ) ) ),
] , timeout=_lowerCAmelCase , )
return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[Any]=180 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=True ):
"""simple docstring"""
_lowerCamelCase : Dict = asyncio.get_event_loop()
_lowerCamelCase : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase ) )
_lowerCamelCase : List[str] = " ".join(_lowerCAmelCase )
if result.returncode > 0:
_lowerCamelCase : int = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCAmelCase__ ( A ):
pass
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple=False ):
"""simple docstring"""
try:
_lowerCamelCase : Optional[Any] = subprocess.check_output(_lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowerCAmelCase , "decode" ):
_lowerCamelCase : List[str] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(_lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 44
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["small", "medium", "large"]
SCREAMING_SNAKE_CASE__ : Optional[int] = "lm_head.decoder.weight"
SCREAMING_SNAKE_CASE__ : List[Any] = "lm_head.weight"
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[Any]:
__lowerCamelCase = torch.load(__lowerCAmelCase )
__lowerCamelCase = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE__ : str = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
SCREAMING_SNAKE_CASE__ : int = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 298
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''roformer'''
def __init__( self : Dict , A : int=5_00_00 , A : Optional[int]=None , A : Optional[int]=7_68 , A : int=12 , A : Union[str, Any]=12 , A : List[Any]=30_72 , A : str="gelu" , A : List[str]=0.1 , A : str=0.1 , A : Optional[Any]=15_36 , A : Tuple=2 , A : int=0.0_2 , A : List[Any]=1E-12 , A : Dict=0 , A : Tuple=False , A : List[Any]=True , **A : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A , **A)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = rotary_value
_UpperCAmelCase = use_cache
class __lowerCAmelCase ( A ):
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
UpperCamelCase :List[str] = {}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> Optional[Any]:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase :List[str] = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = []
def UpperCAmelCase ( self ) -> Optional[Any]:
return list(self.graph )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Union[str, Any]:
if s == d:
return []
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Union[str, Any] = []
if s == -2:
UpperCamelCase :Tuple = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-1 ) -> Optional[int]:
if c == -1:
UpperCamelCase :List[str] = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase :str = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> Tuple:
UpperCamelCase :Union[str, Any] = deque()
UpperCamelCase :Optional[Any] = []
if s == -2:
UpperCamelCase :Optional[int] = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase :List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return len(self.graph[u] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> Any:
UpperCamelCase :str = []
UpperCamelCase :Optional[Any] = []
if s == -2:
UpperCamelCase :Dict = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = s
UpperCamelCase :List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return sorted_nodes
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :int = []
UpperCamelCase :int = []
UpperCamelCase :Union[str, Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = -2
UpperCamelCase :List[str] = []
UpperCamelCase :Optional[Any] = s
UpperCamelCase :Optional[Any] = False
UpperCamelCase :Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :List[str] = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[str] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Union[str, Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = s
UpperCamelCase :List[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :int = []
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Union[str, Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = -2
UpperCamelCase :Any = []
UpperCamelCase :List[str] = s
UpperCamelCase :Dict = False
UpperCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Dict = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Any = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Optional[Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = s
UpperCamelCase :Optional[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> int:
UpperCamelCase :int = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = time()
return end - begin
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> Dict:
UpperCamelCase :Optional[int] = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = time()
return end - begin
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = {}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> Optional[Any]:
# check if the u exists
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase :int = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase :Dict = [[w, u]]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Any:
if s == d:
return []
UpperCamelCase :Any = []
UpperCamelCase :List[Any] = []
if s == -2:
UpperCamelCase :List[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Any = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Optional[int] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-1 ) -> List[str]:
if c == -1:
UpperCamelCase :Union[str, Any] = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase :Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> List[Any]:
UpperCamelCase :Any = deque()
UpperCamelCase :List[str] = []
if s == -2:
UpperCamelCase :Union[str, Any] = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase :Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return len(self.graph[u] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = []
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = -2
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :List[Any] = s
UpperCamelCase :List[str] = False
UpperCamelCase :List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Dict = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Tuple = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Tuple = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = s
UpperCamelCase :Optional[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :int = []
UpperCamelCase :Tuple = []
UpperCamelCase :List[str] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = -2
UpperCamelCase :int = []
UpperCamelCase :Tuple = s
UpperCamelCase :Optional[int] = False
UpperCamelCase :Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Optional[int] = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :int = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :List[Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = s
UpperCamelCase :Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase ( self ) -> List[str]:
return list(self.graph )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Any:
UpperCamelCase :int = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = time()
return end - begin
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> List[Any]:
UpperCamelCase :List[Any] = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = time()
return end - begin
| 658
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0
UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ )
return {
"accuracy": accuracy,
}
| 658
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : str = "bert-base-cased" ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase : Any = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCamelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCamelCase : int = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase , _UpperCamelCase : int = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : Any = config['lr']
_UpperCamelCase : List[str] = int(config['num_epochs'] )
_UpperCamelCase : Optional[int] = int(config['seed'] )
_UpperCamelCase : List[Any] = int(config['batch_size'] )
_UpperCamelCase : Optional[Any] = args.model_name_or_path
set_seed(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : List[str] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_UpperCamelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase : Dict = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_UpperCamelCase : List[Any] = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Any = evaluate.load('glue' , 'mrpc' )
_UpperCamelCase : int = num_epochs
if args.partial_train_epoch is not None:
_UpperCamelCase : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase : str = args.resume_from_checkpoint.split('epoch_' )[1]
_UpperCamelCase : List[str] = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCamelCase : Any = int(UpperCAmelCase_ ) + 1
_UpperCamelCase : Optional[Any] = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase_ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
_UpperCamelCase : Dict = json.load(UpperCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCamelCase : int = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ )
_UpperCamelCase : int = outputs.loss
_UpperCamelCase : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCamelCase : int = F'''epoch_{epoch}'''
_UpperCamelCase : Tuple = os.path.join(args.output_dir , UpperCAmelCase_ )
accelerator.save_state(UpperCAmelCase_ )
_UpperCamelCase : Any = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : int = accuracy
_UpperCamelCase : Tuple = lr_scheduler.get_lr()[0]
_UpperCamelCase : int = optimizer.param_groups[0]['lr']
_UpperCamelCase : int = epoch
_UpperCamelCase : List[Any] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
_UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=2 , help='Number of train epochs.' , )
_UpperCamelCase : Optional[int] = parser.parse_args()
_UpperCamelCase : Union[str, Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 648
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : bool , lowerCamelCase_ : bool ):
def run_func(lowerCamelCase_ : str ):
@wraps(lowerCamelCase_ )
def run_in_eager_mode(*lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
@wraps(lowerCamelCase_ )
@tf.function(experimental_compile=lowerCamelCase_ )
def run_in_graph_mode(*lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__a : List[str] = random.Random()
__a : Optional[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : TensorFlowBenchmarkArguments
__SCREAMING_SNAKE_CASE : PretrainedConfig
__SCREAMING_SNAKE_CASE : str = "TensorFlow"
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return tf.__version__
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : int = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_inference )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_train )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[Any] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_inference )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : List[Any] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_train )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Any = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : Optional[Any] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : List[Any] = __import__('transformers' , fromlist=[model_class] )
__a : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Any = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Optional[int] = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : List[str] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Optional[Any] = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
__a : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Optional[int] = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : List[str] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : Dict = __import__('transformers' , fromlist=[model_class] )
__a : str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Tuple = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__a : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : List[str] = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : Tuple = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
__a : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__a : Optional[Any] = timeit.repeat(
SCREAMING_SNAKE_CASE__ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Callable[[], None] ):
'''simple docstring'''
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__a : Any = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__a : Any = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__a : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__a : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = meminfo.used
__a : Dict = Memory(SCREAMING_SNAKE_CASE__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__a : Any = None
else:
__a : Optional[Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE__ )
__a : Any = Memory(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__a : Any = stop_memory_tracing(SCREAMING_SNAKE_CASE__ )
if memory is None:
__a : Any = summary.total
else:
__a : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 47
|
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:Any ):
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = 2
while i * i <= n:
__magic_name__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowerCAmelCase ( ):
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCamelCase ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 720
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCAmelCase ( __lowerCamelCase:List[Any] , __lowerCamelCase:int , __lowerCamelCase:List[Any]=None , __lowerCamelCase:Any=None , __lowerCamelCase:Any=None , __lowerCamelCase:List[str]=None , __lowerCamelCase:Optional[int]=None , __lowerCamelCase:Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : Any=2 , __lowerCamelCase : int=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : int=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Tuple=0.02 , ) -> List[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def _snake_case ( self : str ) -> List[str]:
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
__magic_name__ = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : Optional[Any] ) -> Any:
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> int:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = 9_9
def _snake_case ( self : Dict ) -> Dict:
__magic_name__ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = lm_model(input_ids=__lowerCamelCase )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( snake_case_ , unittest.TestCase , snake_case_ ):
UpperCAmelCase__ = True
UpperCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : List[Any] ) -> Any:
__magic_name__ = FlaxBlenderbotModelTester(self )
def _snake_case ( self : int ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Dict:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase : Dict , __lowerCamelCase : str=None , **__lowerCamelCase : List[str] ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__magic_name__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _snake_case ( self : int ) -> List[Any]:
__magic_name__ = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
__magic_name__ = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__lowerCamelCase )
__magic_name__ = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
__magic_name__ = ["Sam"]
__magic_name__ = tokenizer(__lowerCamelCase , return_tensors="jax" )
__magic_name__ = model.generate(**__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = "Sam is a great name. It means \"sun\" in Gaelic."
__magic_name__ = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 468
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowercase = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase = DatasetInfo.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , 'dataset_info.json' ) )
def UpperCAmelCase_ ( ):
lowercase = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase = dataset_info._to_yaml_dict()
assert sorted(__SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase = yaml.safe_dump(__SCREAMING_SNAKE_CASE )
lowercase = yaml.safe_load(__SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ):
lowercase = DatasetInfo()
lowercase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , 'README.md' ) )
| 84
|
def __magic_name__ ( lowercase ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase_ : list = []
for temp in range(int(lowercase ) ):
series.append(f"""1/{temp + 1}""" if series else """1""" )
return series
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 458
| 0
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ : Tuple = TypeVar('''T''')
class _UpperCAmelCase ( Generic[T]):
def __init__( self : Union[str, Any] , lowercase_ : bool = True ):
snake_case_ : dict[T, list[T]] = {} # dictionary of lists
snake_case_ : Union[str, Any] = directed
def _snake_case ( self : str , lowercase_ : T , lowercase_ : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
self.adj_list[destination_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
snake_case_ : Optional[int] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase_ )
snake_case_ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case_ : str = [destination_vertex]
snake_case_ : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
snake_case_ : List[Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case_ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case_ : Any = [destination_vertex]
snake_case_ : int = []
return self
def __repr__( self : Union[str, Any] ):
return pformat(self.adj_list )
| 485
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowercase ( _a , _a , _a , _a=None , _a=None ):
# Recurse if needed
if "." in tensor_name:
snake_case_ : Union[str, Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case_ : Any = getattr(_a , _a )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
snake_case_ : int = new_module
snake_case_ : str = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
snake_case_ : Tuple = tensor_name in module._buffers
snake_case_ : Optional[int] = getattr(_a , _a )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
snake_case_ : Optional[Any] = False
snake_case_ : Tuple = False
else:
snake_case_ : Tuple = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case_ : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case_ : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case_ : Any = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : str = value.to('''cpu''' )
if value.dtype == torch.inta:
snake_case_ : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
snake_case_ : Tuple = torch.tensor(_a , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _a ) and fpaa_statistics is None:
snake_case_ : Any = new_value.T
snake_case_ : Tuple = old_value.__dict__
if is_abit:
snake_case_ : Tuple = bnb.nn.IntaParams(_a , requires_grad=_a , **_a ).to(_a )
elif is_abit:
snake_case_ : Any = bnb.nn.Paramsabit(_a , requires_grad=_a , **_a ).to(_a )
snake_case_ : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_a ) )
else:
if value is None:
snake_case_ : Dict = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : Dict = value.to(_a )
else:
snake_case_ : str = torch.tensor(_a , device=_a )
if is_buffer:
snake_case_ : Optional[int] = new_value
else:
snake_case_ : Optional[Any] = nn.Parameter(_a , requires_grad=old_value.requires_grad )
snake_case_ : List[Any] = new_value
def __lowercase ( _a , _a=None , _a=None , _a=None , _a=False ):
for name, module in model.named_children():
if current_key_name is None:
snake_case_ : List[str] = []
current_key_name.append(_a )
if (isinstance(_a , nn.Linear ) or isinstance(_a , _a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_a , _a ):
snake_case_, snake_case_ : List[Any] = module.weight.shape
else:
snake_case_ : Dict = module.in_features
snake_case_ : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case_ : str = bnb.nn.LinearabitLt(
_a , _a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case_ : str = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case_ : Union[str, Any] = bnb.nn.Linearabit(
_a , _a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case_ : List[Any] = True
# Store the module class in case we need to transpose the weight later
snake_case_ : str = type(_a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_a )
if len(list(module.children() ) ) > 0:
snake_case_, snake_case_ : Optional[int] = _replace_with_bnb_linear(
_a , _a , _a , _a , has_been_replaced=_a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase ( _a , _a=None , _a=None , _a=None ):
snake_case_ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case_, snake_case_ : List[Any] = _replace_with_bnb_linear(
_a , _a , _a , _a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _a , )
return replace_with_bnb_linear(*_a , **_a )
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _a , )
return set_module_quantized_tensor_to_device(*_a , **_a )
def __lowercase ( _a ):
snake_case_ : List[str] = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case_ : Optional[Any] = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a , _a ):
snake_case_ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ : str = sum(_a , [] )
snake_case_ : str = len(_a ) > 0
# Check if it is a base model
snake_case_ : Dict = not hasattr(_a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ : Dict = list(model.named_children() )
snake_case_ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ : Optional[int] = set(_a ) - set(_a )
snake_case_ : List[Any] = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
snake_case_ : Any = ['''.weight''', '''.bias''']
snake_case_ : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ : List[str] = name.replace(_a , '''''' )
filtered_module_names.append(_a )
return filtered_module_names
| 485
| 1
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_lowercase = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_lowercase = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_lowercase = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def __UpperCamelCase ( a : List[str] , a : Dict ) ->str:
return float((preds == labels).mean() )
def __UpperCamelCase ( a : Dict , a : List[Any] ) ->Union[str, Any]:
snake_case = simple_accuracy(a , a )
snake_case = float(fa_score(y_true=a , y_pred=a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( a : Dict , a : Dict ) ->Tuple:
snake_case = np.array(a )
snake_case = np.array(a )
snake_case = en_sentvecs.shape[0]
# mean centering
snake_case = en_sentvecs - np.mean(a , axis=0 )
snake_case = in_sentvecs - np.mean(a , axis=0 )
snake_case = cdist(a , a , '''cosine''' )
snake_case = np.array(range(a ) )
snake_case = sim.argsort(axis=1 )[:, :10]
snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(A__ , A__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(A__ , A__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(A__ , A__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 342
|
'''simple docstring'''
import math
def __UpperCamelCase ( a : int ) ->list[int]:
snake_case = []
snake_case = 2
snake_case = int(math.sqrt(a ) ) # Size of every segment
snake_case = [True] * (end + 1)
snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
snake_case = False
start += 1
prime += in_prime
snake_case = end + 1
snake_case = min(2 * end , a )
while low <= n:
snake_case = [True] * (high - low + 1)
for each in in_prime:
snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
snake_case = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
snake_case = high + 1
snake_case = min(high + end , a )
return prime
print(sieve(10**6))
| 342
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] ={
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case__( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """unispeech-sat"""
def __init__( self , __lowercase=3_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(1_0, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=1_2_8 , __lowercase=1_6 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=1_0 , __lowercase=2 , __lowercase=0.0 , __lowercase=1_0 , __lowercase=0 , __lowercase=3_2_0 , __lowercase=2 , __lowercase=0.1 , __lowercase=1_0_0 , __lowercase=2_5_6 , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=2_5_6 , __lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=5_1_2 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=5_0_4 , **__lowercase , ) -> Any:
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Optional[int] = feat_extract_norm
lowerCAmelCase_ : Optional[int] = feat_extract_activation
lowerCAmelCase_ : List[Any] = list(UpperCAmelCase_ )
lowerCAmelCase_ : List[Any] = list(UpperCAmelCase_ )
lowerCAmelCase_ : int = list(UpperCAmelCase_ )
lowerCAmelCase_ : Tuple = conv_bias
lowerCAmelCase_ : str = num_conv_pos_embeddings
lowerCAmelCase_ : List[str] = num_conv_pos_embedding_groups
lowerCAmelCase_ : Any = len(self.conv_dim )
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : List[Any] = hidden_dropout
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Any = activation_dropout
lowerCAmelCase_ : List[str] = feat_proj_dropout
lowerCAmelCase_ : Any = final_dropout
lowerCAmelCase_ : Any = layerdrop
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : List[str] = num_clusters
lowerCAmelCase_ : Tuple = do_stable_layer_norm
lowerCAmelCase_ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ : Dict = apply_spec_augment
lowerCAmelCase_ : Tuple = mask_time_prob
lowerCAmelCase_ : Tuple = mask_time_length
lowerCAmelCase_ : Dict = mask_time_min_masks
lowerCAmelCase_ : Any = mask_feature_prob
lowerCAmelCase_ : List[Any] = mask_feature_length
lowerCAmelCase_ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ : Optional[Any] = num_codevectors_per_group
lowerCAmelCase_ : List[Any] = num_codevector_groups
lowerCAmelCase_ : Optional[Any] = contrastive_logits_temperature
lowerCAmelCase_ : Union[str, Any] = feat_quantizer_dropout
lowerCAmelCase_ : List[str] = num_negatives
lowerCAmelCase_ : Tuple = codevector_dim
lowerCAmelCase_ : List[str] = proj_codevector_dim
lowerCAmelCase_ : Optional[int] = diversity_loss_weight
# ctc loss
lowerCAmelCase_ : Dict = ctc_loss_reduction
lowerCAmelCase_ : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ : Optional[Any] = list(UpperCAmelCase_ )
lowerCAmelCase_ : List[str] = list(UpperCAmelCase_ )
lowerCAmelCase_ : str = list(UpperCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def lowercase_ ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 702
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 619
| 0
|
from __future__ import annotations
from math import pi
def lowerCamelCase__ ( _a , _a , _a):
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if inductance < 0:
raise ValueError("Inductance cannot be negative")
if frequency < 0:
raise ValueError("Frequency cannot be negative")
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative")
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A ( a ):
"""simple docstring"""
A_ = 0
A_ = False
A_ = 3.0
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_lowerCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def snake_case_( self )-> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase__ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase__ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _lowerCamelCase )
@require_multi_gpu
def snake_case_( self )-> Union[str, Any]:
lowercase__ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
_lowerCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
_lowerCAmelCase = torch.nn.Linear(1_0_0, 2_0_0)
_lowerCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
_lowerCAmelCase = ""
_lowerCAmelCase = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 161
| 0
|
import os
import sys
import transformers
__magic_name__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 702
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[Any]=99 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Union[str, Any]=5 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : int=4 , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def snake_case__ ( self : int ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = True
_snake_case : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_UpperCamelCase = model(lowerCAmelCase__ )[0]
_UpperCamelCase = [1, 11, 50265]
self.assertEqual(list(output.shape ) , lowerCAmelCase__ )
# compare the actual values for a slice.
_UpperCamelCase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_UpperCamelCase = model(lowerCAmelCase__ )[0]
# compare the actual values for a slice.
_UpperCamelCase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 98
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase ( self :Any , **__UpperCamelCase :Union[str, Any] ):
A = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__UpperCamelCase )
return config
def lowerCamelCase ( self :Optional[int] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def lowerCamelCase ( self :Any ):
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def lowerCamelCase ( self :Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def lowerCamelCase ( self :Union[str, Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = len(__UpperCamelCase )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
A = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def lowerCamelCase ( self :Optional[int] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type="v_prediction" )
A = scheduler_class(**__UpperCamelCase )
A = len(__UpperCamelCase )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
A = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(__UpperCamelCase ) )
A = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def lowerCamelCase ( self :Union[str, Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
A = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(__UpperCamelCase )
A = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :int ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [1_00, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [1_00, 87, 50, 1, 0]
A = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**__UpperCamelCase )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 524
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = state_dict.pop(UpperCamelCase )
A = val
def A__ ( UpperCamelCase ):
A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
A = value
else:
A = value
return new_state_dict
def A__ ( UpperCamelCase ):
A = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A = in_proj_weight_cross_attn[:256, :]
A = in_proj_bias_cross_attn[:256]
A = in_proj_weight_cross_attn[256:512, :]
A = in_proj_bias_cross_attn[256:512]
A = in_proj_weight_cross_attn[-256:, :]
A = in_proj_bias_cross_attn[-256:]
def A__ ( UpperCamelCase , UpperCamelCase ):
A, A = image.size
A = max(UpperCamelCase , UpperCamelCase )
A = 800 if "detection" in checkpoint_url else 1_000
A = target_max_size / current_max_size
A = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A__ ( UpperCamelCase ):
A = F.to_tensor(UpperCamelCase )
A = F.normalize(UpperCamelCase , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
logger.info("Converting model..." )
# load original state dict
A = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A = state_dict.pop(UpperCamelCase )
A = val
# create HuggingFace model and load state dict
A = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A = 15
A = 2
A = {0: "table", 1: "table rotated"}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 125
A = 6
A = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
A = TableTransformerForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion
A = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
A = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=UpperCamelCase )
A = Image.open(UpperCamelCase ).convert("RGB" )
A = normalize(resize(UpperCamelCase , UpperCamelCase ) ).unsqueeze(0 )
A = model(UpperCamelCase )
if "detection" in checkpoint_url:
A = (1, 15, 3)
A = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A = (1, 125, 7)
A = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
A = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(UpperCamelCase )
image_processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 524
| 1
|
'''simple docstring'''
def _A ( UpperCAmelCase = 1000 ):
'''simple docstring'''
A__ = 2**power
A__ = 0
while n:
A__ , A__ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 531
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase__ = False
try:
lowerCAmelCase__ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = [] ):
__lowercase = 0
__lowercase = choices
__lowercase = prompt
if sys.platform == "win32":
__lowercase = "*"
else:
__lowercase = "➔ "
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , lowerCAmelCase_ )
else:
forceWrite(self.choices[index] , lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ ):
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(lowerCAmelCase_ )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = 1 ):
__lowercase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCAmelCase_ )
move_cursor(lowerCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def snake_case__ ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def snake_case__ ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def snake_case__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def snake_case__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCAmelCase_ )] for number in range(10 )] )
def snake_case__ ( self ):
__lowercase = int(chr(self.current_selection ) )
__lowercase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCAmelCase_ )
else:
return
else:
return
def snake_case__ ( self , lowerCAmelCase_ = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
__lowercase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCAmelCase_ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
__lowercase = int(builtins.input() )
except ValueError:
__lowercase = default_choice
else:
__lowercase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(lowerCAmelCase_ , "\n" )
return choice
| 321
| 0
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _lowerCAmelCase , )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = RobertaConfig
a__ : Union[str, Any] = "roberta"
def __init__( self : Union[str, Any] , _lowercase : List[str] ):
super().__init__(_lowercase )
__UpperCAmelCase = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _lowerCAmelCase , )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = RobertaConfig
a__ : Union[str, Any] = "roberta"
def __init__( self : int , _lowercase : List[str] ):
super().__init__(_lowercase )
__UpperCAmelCase = config.num_labels
__UpperCAmelCase = config.num_hidden_layers
__UpperCAmelCase = DeeRobertaModel(_lowercase )
__UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def a ( self : Any , _lowercase : Tuple=None , _lowercase : Optional[int]=None , _lowercase : Optional[Any]=None , _lowercase : List[Any]=None , _lowercase : Dict=None , _lowercase : Dict=None , _lowercase : Dict=None , _lowercase : List[Any]=-1 , _lowercase : List[str]=False , ):
__UpperCAmelCase = self.num_layers
try:
__UpperCAmelCase = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
__UpperCAmelCase = outputs[1]
__UpperCAmelCase = self.dropout(_lowercase )
__UpperCAmelCase = self.classifier(_lowercase )
__UpperCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCAmelCase = e.message
__UpperCAmelCase = e.exit_layer
__UpperCAmelCase = outputs[0]
if not self.training:
__UpperCAmelCase = entropy(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase = MSELoss()
__UpperCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCAmelCase = []
for highway_exit in outputs[-1]:
__UpperCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase = MSELoss()
__UpperCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
__UpperCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCAmelCase = (loss,) + outputs
if not self.training:
__UpperCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 397
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :nn.ModuleList , snake_case_ :nn.ModuleList , snake_case_ :List[int] ):
__UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(snake_case_ ) == len(snake_case_ ), F'''{len(snake_case_ )} != {len(snake_case_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_lowercase : str = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_lowercase : str = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple ):
try:
__UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(snake_case_ ) )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :List[str] ):
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(snake_case_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowercase__ ( snake_case_ :Union[str, PreTrainedModel] , snake_case_ :Union[str, Path] = "student" , snake_case_ :Union[int, None] = None , snake_case_ :Union[int, None] = None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[str]=None , **snake_case_ :List[str] , ):
__UpperCAmelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(snake_case_ , snake_case_ ):
AutoTokenizer.from_pretrained(snake_case_ ).save_pretrained(snake_case_ ) # purely for convenience
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).eval()
else:
assert isinstance(snake_case_ , snake_case_ ), F'''teacher must be a model or string got type {type(snake_case_ )}'''
__UpperCAmelCase = teacher.config.to_diff_dict()
try:
__UpperCAmelCase , __UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__UpperCAmelCase = teacher_e
if d is None:
__UpperCAmelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__UpperCAmelCase , __UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__UpperCAmelCase , __UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__UpperCAmelCase = teacher_e
if d is None:
__UpperCAmelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(snake_case_ )
# Copy weights
__UpperCAmelCase = teacher.config_class(**snake_case_ )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(snake_case_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=snake_case_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__UpperCAmelCase , __UpperCAmelCase = list(range(snake_case_ ) ), list(range(snake_case_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(snake_case_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__UpperCAmelCase = pick_layers_to_copy(snake_case_ , snake_case_ )
if d_layers_to_copy is None:
__UpperCAmelCase = pick_layers_to_copy(snake_case_ , snake_case_ )
try:
if hasattr(
snake_case_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , snake_case_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , snake_case_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , snake_case_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , snake_case_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , snake_case_ )
copy_layers(teacher.decoder.block , student.decoder.block , snake_case_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
__UpperCAmelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(snake_case_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 397
| 1
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = order
# a_{0} ... a_{k}
snake_case__ : List[str] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case__ : Optional[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case__ : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case__ : int = [0.0] * self.order
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) < self.order:
snake_case__ : Dict = [1.0, *a_coeffs]
if len(__SCREAMING_SNAKE_CASE ) != self.order + 1:
snake_case__ : Optional[int] = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != self.order + 1:
snake_case__ : Optional[Any] = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = a_coeffs
snake_case__ : int = b_coeffs
def lowercase__ ( self , lowerCamelCase ) -> float:
"""simple docstring"""
snake_case__ : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case__ : Tuple = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case__ : Optional[Any] = self.input_history[:-1]
snake_case__ : Tuple = self.output_history[:-1]
snake_case__ : Tuple = sample
snake_case__ : Optional[int] = result
return result
| 261
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="last" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) ->Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->str:
lowerCAmelCase = FlaubertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , lengths=__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[int]:
lowerCAmelCase = FlaubertWithLMHeadModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Union[str, Any]:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[Any]:
lowerCAmelCase = FlaubertForQuestionAnswering(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , p_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->str:
lowerCAmelCase = FlaubertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->List[str]:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : int = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
lowerCAmelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , emb_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''' ) )
lowerCAmelCase = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''' ) , map_location=__SCREAMING_SNAKE_CASE )
loaded(inputs_dict['''input_ids'''].to(__SCREAMING_SNAKE_CASE ) , inputs_dict['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 312
| 0
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_):
a__ = AutoConfig.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
a__ = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
AutoTokenizer.from_pretrained(lowerCamelCase_).save_pretrained(lowerCamelCase_)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 713
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Any = logging.get_logger(__name__)
__a : Union[str, Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='swin2sr'
_SCREAMING_SNAKE_CASE ={
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Union[str, Any] , __A: List[Any]=64 , __A: int=1 , __A: Dict=3 , __A: List[Any]=180 , __A: int=[6, 6, 6, 6, 6, 6] , __A: Tuple=[6, 6, 6, 6, 6, 6] , __A: int=8 , __A: Optional[int]=2.0 , __A: Optional[int]=True , __A: int=0.0 , __A: Any=0.0 , __A: Optional[Any]=0.1 , __A: Optional[Any]="gelu" , __A: Dict=False , __A: List[Any]=0.0_2 , __A: List[Any]=1e-5 , __A: List[str]=2 , __A: int=1.0 , __A: Dict="1conv" , __A: Optional[Any]="pixelshuffle" , **__A: Dict , ):
'''simple docstring'''
super().__init__(**__A )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = embed_dim
a__ = depths
a__ = len(__A )
a__ = num_heads
a__ = window_size
a__ = mlp_ratio
a__ = qkv_bias
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = drop_path_rate
a__ = hidden_act
a__ = use_absolute_embeddings
a__ = layer_norm_eps
a__ = initializer_range
a__ = upscale
a__ = img_range
a__ = resi_connection
a__ = upsampler
| 200
| 0
|
"""simple docstring"""
from maths.prime_check import is_prime
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int ) -> int:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_SCREAMING_SNAKE_CASE )
if is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """roc_bert"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0_5_2_2 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=3_0_7_2 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=9_1_0 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2_4_8_5_8 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
a_ : Optional[int] = vocab_size
a_ : str = max_position_embeddings
a_ : List[Any] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : List[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = initializer_range
a_ : str = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : str = use_cache
a_ : Tuple = enable_pronunciation
a_ : Dict = enable_shape
a_ : int = pronunciation_embed_dim
a_ : List[Any] = pronunciation_vocab_size
a_ : int = shape_embed_dim
a_ : List[str] = shape_vocab_size
a_ : List[Any] = concat_input
a_ : List[str] = position_embedding_type
a_ : Any = classifier_dropout
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 473
| 1
|
UpperCamelCase = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
UpperCamelCase = {value: key for key, value in encode_dict.items()}
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> str:
__UpperCamelCase : Optional[Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Union[str, Any]:
if set(__lowerCAmelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
__UpperCamelCase : List[Any] = """"""
for word in coded.split():
while len(__lowerCAmelCase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( UpperCAmelCase_ ):
def __init__( self : Optional[Any] , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Dict = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
__UpperCamelCase : int = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def a ( self : Optional[int] ):
"""simple docstring"""
if self.streaming:
__UpperCamelCase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase : Any = None
__UpperCamelCase : int = None
__UpperCamelCase : int = None
__UpperCamelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
__UpperCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 515
| 0
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCAmelCase__ :Optional[int] = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ :List[Any] = '''PoolFormerConfig'''
# Base docstring
lowerCAmelCase__ :Tuple = '''sail/poolformer_s12'''
lowerCAmelCase__ :List[Any] = [1, 5_1_2, 7, 7]
# Image classification docstring
lowerCAmelCase__ :Optional[Any] = '''sail/poolformer_s12'''
lowerCAmelCase__ :Any = '''tabby, tabby cat'''
lowerCAmelCase__ :Dict = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase__ ( a__: List[Any] , a__: float = 0.0 , a__: bool = False ) -> Optional[Any]:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
_UpperCAmelCase = 1 - drop_prob
_UpperCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_UpperCAmelCase = keep_prob + torch.rand(a__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_UpperCAmelCase = input.div(a__ ) * random_tensor
return output
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = drop_prob
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
return drop_path(_SCREAMING_SNAKE_CASE , self.drop_prob , self.training )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = patch_size if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ) else (patch_size, patch_size)
_UpperCAmelCase = stride if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ) else (stride, stride)
_UpperCAmelCase = padding if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ) else (padding, padding)
_UpperCAmelCase = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = norm_layer(_SCREAMING_SNAKE_CASE ) if norm_layer else nn.Identity()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.projection(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.norm(_SCREAMING_SNAKE_CASE )
return embeddings
class __a ( nn.GroupNorm ):
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.AvgPoolad(_SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.pool(_SCREAMING_SNAKE_CASE ) - hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
_UpperCAmelCase = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
_UpperCAmelCase = PoolFormerDropPath(_SCREAMING_SNAKE_CASE )
if isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ACTaFN[config.hidden_act]
else:
_UpperCAmelCase = config.hidden_act
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.conva(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.act_fn(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.drop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.conva(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.drop(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = PoolFormerPooling(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PoolFormerOutput(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE )
# Useful for training neural nets
_UpperCAmelCase = PoolFormerDropPath(_SCREAMING_SNAKE_CASE ) if drop_path > 0.0 else nn.Identity()
_UpperCAmelCase = config.use_layer_scale
if config.use_layer_scale:
_UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE) ) , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE) ) , requires_grad=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if self.use_layer_scale:
_UpperCAmelCase = self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_UpperCAmelCase = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ()
_UpperCAmelCase = self.output(self.after_norm(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_UpperCAmelCase = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (output,) + outputs
return outputs
else:
_UpperCAmelCase = self.drop_path(self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE ) ) )
# First residual connection
_UpperCAmelCase = pooling_output + hidden_states
_UpperCAmelCase = ()
# Second residual connection inside the PoolFormerOutput block
_UpperCAmelCase = self.drop_path(self.output(self.after_norm(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = hidden_states + layer_output
_UpperCAmelCase = (output,) + outputs
return outputs
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = config
# stochastic depth decay rule
_UpperCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_UpperCAmelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_UpperCAmelCase = nn.ModuleList(_SCREAMING_SNAKE_CASE )
# Transformer blocks
_UpperCAmelCase = []
_UpperCAmelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_UpperCAmelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = nn.ModuleList(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = () if output_hidden_states else None
_UpperCAmelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_UpperCAmelCase , _UpperCAmelCase = layers
# Get patch embeddings from hidden_states
_UpperCAmelCase = embedding_layer(_SCREAMING_SNAKE_CASE )
# Send the embeddings through the blocks
for _, blk in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = blk(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = layer_outputs[0]
if output_hidden_states:
_UpperCAmelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE )
class __a ( UpperCAmelCase ):
_a : Optional[int] = PoolFormerConfig
_a : List[Any] = 'poolformer'
_a : Optional[int] = 'pixel_values'
_a : Tuple = True
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = value
lowerCAmelCase__ :Optional[int] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ :Optional[Any] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCAmelCase , )
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = config
_UpperCAmelCase = PoolFormerEncoder(_SCREAMING_SNAKE_CASE )
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_UpperCAmelCase = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(config.hidden_size , config.hidden_size )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.dense(_SCREAMING_SNAKE_CASE )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCAmelCase , )
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = PoolFormerModel(_SCREAMING_SNAKE_CASE )
# Final norm
_UpperCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_UpperCAmelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.poolformer(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = outputs[0]
_UpperCAmelCase = self.classifier(self.norm(_SCREAMING_SNAKE_CASE ).mean([-2, -1] ) )
_UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase = 'single_label_classification'
else:
_UpperCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_UpperCAmelCase = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase = BCEWithLogitsLoss()
_UpperCAmelCase = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 618
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_UpperCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sgugger/tiny-distilbert-classification'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'patrickvonplaten/t5-tiny-random'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() )
| 618
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = KandinskyVaaControlnetPipeline
lowercase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowercase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase = False
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 1_0_0
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__magic_name__ , )
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase_ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[str]=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__magic_name__ )
# create hint
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**__magic_name__ )
UpperCamelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = pipe(**self.get_dummy_inputs(__magic_name__ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCamelCase = torch.from_numpy(np.array(__magic_name__ ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
UpperCamelCase = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = """A robot, 4k photo"""
UpperCamelCase = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCamelCase = pipeline(
image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , hint=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_0_0 , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 705
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = KandinskyInpaintPipeline
lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase = False
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 1_0_0
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCamelCase = MultilingualCLIP(__magic_name__ )
UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__magic_name__ , )
UpperCamelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase_ ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCamelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCamelCase = 0
if str(__magic_name__ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**__magic_name__ )
UpperCamelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = pipe(**self.get_dummy_inputs(__magic_name__ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCamelCase = 0
UpperCamelCase = """a hat"""
UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase = pipeline(
__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 181
| 0
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 619
|
import copy
import random
from transformers import CLIPTokenizer
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = {}
def _lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = super().add_tokens(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def _lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=1 , **__lowerCAmelCase ):
UpperCamelCase__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
output.append(__lowerCAmelCase )
else:
UpperCamelCase__ = []
for i in range(__lowerCAmelCase ):
UpperCamelCase__ = placeholder_token + f"""_{i}"""
self.try_adding_tokens(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
output.append(__lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
UpperCamelCase__ = output
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = []
for i in range(len(__lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCamelCase__ = self.token_map[placeholder_token]
UpperCamelCase__ = tokens[: 1 + int(len(__lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCamelCase__ = copy.copy(__lowerCAmelCase )
random.shuffle(__lowerCAmelCase )
UpperCamelCase__ = text.replace(__lowerCAmelCase , """ """.join(__lowerCAmelCase ) )
return text
def __call__( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , **__lowerCAmelCase ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowerCAmelCase , vector_shuffle=__lowerCAmelCase , prop_tokens_to_load=__lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase , )
def _lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , **__lowerCAmelCase ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowerCAmelCase , vector_shuffle=__lowerCAmelCase , prop_tokens_to_load=__lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase , )
| 619
| 1
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "pixel_values"
snake_case__ : Any = False
snake_case__ : Tuple = TimmBackboneConfig
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[int] ) -> Dict:
requires_backends(self , "timm" )
super().__init__(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(UpperCAmelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
__SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , "use_pretrained_backbone" , UpperCAmelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
__SCREAMING_SNAKE_CASE = config.out_indices if getattr(UpperCAmelCase__ , "out_indices" , UpperCAmelCase__ ) is not None else (-1,)
__SCREAMING_SNAKE_CASE = timm.create_model(
config.backbone , pretrained=UpperCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase__ , **UpperCAmelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__SCREAMING_SNAKE_CASE = self._backbone.return_layers
__SCREAMING_SNAKE_CASE = {layer["module"]: str(UpperCAmelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase__ )
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ) -> Tuple:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
__SCREAMING_SNAKE_CASE = kwargs.pop("config" , TimmBackboneConfig() )
__SCREAMING_SNAKE_CASE = kwargs.pop("use_timm_backbone" , UpperCAmelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_channels" , config.num_channels )
__SCREAMING_SNAKE_CASE = kwargs.pop("features_only" , config.features_only )
__SCREAMING_SNAKE_CASE = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
__SCREAMING_SNAKE_CASE = kwargs.pop("out_indices" , config.out_indices )
__SCREAMING_SNAKE_CASE = TimmBackboneConfig(
backbone=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , features_only=UpperCAmelCase__ , use_pretrained_backbone=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , )
return super()._from_config(UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : str ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__SCREAMING_SNAKE_CASE = self._all_layers
__SCREAMING_SNAKE_CASE = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._return_layers
__SCREAMING_SNAKE_CASE = tuple(hidden_states[i] for i in self.out_indices )
else:
__SCREAMING_SNAKE_CASE = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = tuple(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tuple(UpperCAmelCase__ ) if hidden_states is not None else None
if not return_dict:
__SCREAMING_SNAKE_CASE = (feature_maps,)
if output_hidden_states:
__SCREAMING_SNAKE_CASE = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , attentions=UpperCAmelCase__ )
| 553
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return EnvironmentCommand()
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = parser.add_parser("env" )
download_parser.set_defaults(func=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = huggingface_hub.__version__
__SCREAMING_SNAKE_CASE = "not installed"
__SCREAMING_SNAKE_CASE = "NA"
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = torch.__version__
__SCREAMING_SNAKE_CASE = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE = "not installed"
if is_transformers_available():
import transformers
__SCREAMING_SNAKE_CASE = transformers.__version__
__SCREAMING_SNAKE_CASE = "not installed"
if is_accelerate_available():
import accelerate
__SCREAMING_SNAKE_CASE = accelerate.__version__
__SCREAMING_SNAKE_CASE = "not installed"
if is_xformers_available():
import xformers
__SCREAMING_SNAKE_CASE = xformers.__version__
__SCREAMING_SNAKE_CASE = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(UpperCAmelCase__ ) )
return info
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : Optional[Any] ) -> str:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 553
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase , **UpperCAmelCase ):
pass
@is_pipeline_test
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCamelCase = image_classifier(UpperCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}],
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """c"""}, {"""score""": 0.3_33, """label""": """b"""}],
] , )
__lowerCamelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCamelCase = image_classifier(UpperCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}] , )
__lowerCamelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
[
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
{"""score""": 0.3_33, """label""": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCamelCase = image_classifier(UpperCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
__lowerCamelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCamelCase = image_classifier(UpperCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
__lowerCamelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
| 479
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : List[str] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_a : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_a : Tuple = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def UpperCamelCase__ ( _A: Dict , _A: Optional[int] , _A: Union[str, Any] , _A: str ):
'''simple docstring'''
__lowerCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
__lowerCamelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , _A , )
is not None
):
__lowerCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCamelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__lowerCamelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__lowerCamelCase = True
if not attribute_used:
__lowerCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCamelCase = True
elif attribute.endswith("""_token_id""" ):
__lowerCamelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase__ ( _A: Dict ):
'''simple docstring'''
__lowerCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCamelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__lowerCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCamelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCamelCase = inspect.getsourcefile(_A )
__lowerCamelCase = os.path.dirname(_A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCamelCase = [os.path.join(_A , _A ) for fn in os.listdir(_A ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__lowerCamelCase = []
for path in modeling_paths:
if os.path.isfile(_A ):
with open(_A ) as fp:
modeling_sources.append(fp.read() )
__lowerCamelCase = []
for config_param, default_value in zip(_A , _A ):
# `attributes` here is all the variant names for `config_param`
__lowerCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_A , _A , _A , _A ):
unused_attributes.append(attributes[0] )
return sorted(_A )
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _A : inspect.isclass(_A )
and issubclass(_A , _A )
and inspect.getmodule(_A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCamelCase = check_config_attributes_being_used(_A )
if len(_A ) > 0:
__lowerCamelCase = unused_attributes
if len(_A ) > 0:
__lowerCamelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_A )
if __name__ == "__main__":
check_config_attributes()
| 479
| 1
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.nan
for i in range(lowerCamelCase ):
__lowercase = features[:, labels == i]
__lowercase = data.mean(1 )
# Centralize the data of class i
__lowercase = data - column_reshape(lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowercase = np.dot(lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = features.mean(1 )
__lowercase = np.nan
for i in range(lowerCamelCase ):
__lowercase = features[:, labels == i]
__lowercase = data.shape[1]
__lowercase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowercase = device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if features.any():
__lowercase = features.mean(1 )
# Center the dataset
__lowercase = features - np.reshape(lowerCamelCase , (data_mean.size, 1) )
__lowercase = np.dot(lowerCamelCase , centered_data.T ) / features.shape[1]
__lowercase , __lowercase = np.linalg.eigh(lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowercase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowercase = np.dot(filtered_eigenvectors.T , lowerCamelCase )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowerCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowercase , __lowercase = eigh(
covariance_between_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , covariance_within_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , )
__lowercase = eigenvectors[:, ::-1][:, :dimensions]
__lowercase , __lowercase , __lowercase = np.linalg.svd(lowerCamelCase )
__lowercase = svd_matrix[:, 0:dimensions]
__lowercase = np.dot(filtered_svd_matrix.T , lowerCamelCase )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowerCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case ( ):
'''simple docstring'''
__lowercase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowercase = np.array([0, 0, 0, 1, 1] )
__lowercase = 2
__lowercase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase ) as error_info:
__lowercase = linear_discriminant_analysis(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def snake_case ( ):
'''simple docstring'''
__lowercase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowercase = 2
__lowercase = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase ) as error_info:
__lowercase = principal_component_analysis(lowerCamelCase , lowerCamelCase )
if not np.allclose(lowerCamelCase , lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
"""simple docstring"""
def lowercase ( __UpperCamelCase ) -> int:
__magic_name__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase ( __UpperCamelCase ) -> int:
__magic_name__ = 0
while number > 0:
__magic_name__ = number % 10
sum_of_digits += last_digit
__magic_name__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase ( __UpperCamelCase = 100 ) -> int:
__magic_name__ = factorial(__UpperCamelCase )
__magic_name__ = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 490
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 490
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[str] , __A: Optional[int] , __A: Dict=None , __A: str=None , __A: Any=None , __A: Optional[Any]="resnet50" , __A: Any=3 , __A: Union[str, Any]=32 , __A: Optional[int]=3 , __A: Any=True , __A: str=True , ):
'''simple docstring'''
a__ = parent
a__ = out_indices if out_indices is not None else [4]
a__ = stage_names
a__ = out_features
a__ = backbone
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = use_pretrained_backbone
a__ = is_training
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = self.get_config()
return config, pixel_values
def lowercase ( self: Any ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self: Dict , __A: int , __A: Dict ):
'''simple docstring'''
a__ = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
a__ = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ ,a__ = config_and_inputs
a__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase ( self: List[Any] ):
'''simple docstring'''
a__ = TimmBackboneModelTester(self )
a__ = ConfigTester(self , config_class=__A , has_text_modality=__A )
def lowercase ( self: str ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = '''resnet18'''
a__ = '''microsoft/resnet-18'''
a__ = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A )
a__ = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a__ = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A , out_indices=[1, 2, 3] )
a__ = AutoBackbone.from_pretrained(__A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowercase ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowercase ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowercase ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
a__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ = self.all_model_classes[0]
a__ = model_class(__A )
model.to(__A )
a__ = self._prepare_for_class(__A , __A )
a__ = model(**__A )
a__ = outputs[0][-1]
# Encoder-/Decoder-only models
a__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
model.to(__A )
model.eval()
a__ = model(**__A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a__ = copy.deepcopy(__A )
a__ = None
a__ = model_class(__A )
model.to(__A )
model.eval()
a__ = model(**__A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a__ = copy.deepcopy(__A )
a__ = False
a__ = model_class(__A )
model.to(__A )
model.eval()
a__ = model(**__A )
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: List[str] , __A: Optional[Any] , __A: List[Any]=13 , __A: Optional[int]=7 , __A: Any=True , __A: str=True , __A: Any=True , __A: str=True , __A: Optional[Any]=99 , __A: Union[str, Any]=32 , __A: str=5 , __A: Any=4 , __A: List[str]=37 , __A: Union[str, Any]="gelu" , __A: str=0.1 , __A: Tuple=0.1 , __A: Optional[Any]=512 , __A: Union[str, Any]=16 , __A: str=2 , __A: Any=0.0_2 , __A: Tuple=4 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_choices
def lowercase ( self: int ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_attention_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ ,a__ ,a__ ,a__ = config_and_inputs
a__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ ,a__ ,a__ ,a__ = config_and_inputs
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self: Tuple ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase ( self: int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__A )
a__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self: Dict ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__A )
a__ = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
a__ = model(__A )[0]
a__ = [1, 11, 50265]
self.assertEqual(list(output.shape ) , __A )
# compare the actual values for a slice.
a__ = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowercase ( self: str ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__A )
a__ = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
a__ = model(__A )[0]
# compare the actual values for a slice.
a__ = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 200
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def lowercase__ ( A_: Dict , A_: List[Any] , A_: str=8 ) -> str:
"""simple docstring"""
__UpperCAmelCase =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( A_: Dict , A_: Tuple=512 , A_: Any=512 ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase =pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__UpperCAmelCase =np.array(pil_image.convert("""RGB""" ) )
__UpperCAmelCase =arr.astype(np.floataa ) / 1_2_7.5 - 1
__UpperCAmelCase =np.transpose(_snake_case , [2, 0, 1] )
__UpperCAmelCase =torch.from_numpy(_snake_case ).unsqueeze(0 )
return image
class _A ( _A ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : DDPMScheduler , __SCREAMING_SNAKE_CASE : VQModel , ) -> Tuple:
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
__UpperCAmelCase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
# get the original timestep using init_timestep
__UpperCAmelCase =min(int(num_inference_steps * strength ) , __lowerCAmelCase )
__UpperCAmelCase =max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=None ) -> List[Any]:
if not isinstance(__lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}''' )
__UpperCAmelCase =image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
__UpperCAmelCase =batch_size * num_images_per_prompt
if image.shape[1] == 4:
__UpperCAmelCase =image
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =[
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
__UpperCAmelCase =torch.cat(__lowerCAmelCase , dim=0 )
else:
__UpperCAmelCase =self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
__UpperCAmelCase =self.movq.config.scaling_factor * init_latents
__UpperCAmelCase =torch.cat([init_latents] , dim=0 )
__UpperCAmelCase =init_latents.shape
__UpperCAmelCase =randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
# get latents
__UpperCAmelCase =self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase =init_latents
return latents
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__UpperCAmelCase =torch.device(f'''cuda:{gpu_id}''' )
__UpperCAmelCase =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : int , __SCREAMING_SNAKE_CASE : str=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__UpperCAmelCase =torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase =None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase =cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
__UpperCAmelCase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self : Union[str, Any] ) -> Any:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 4.0 , __SCREAMING_SNAKE_CASE : float = 0.3 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[Any]:
__UpperCAmelCase =self._execution_device
__UpperCAmelCase =guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =torch.cat(__lowerCAmelCase , dim=0 )
__UpperCAmelCase =image_embeds.shape[0]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =torch.cat(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
__UpperCAmelCase =image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
__UpperCAmelCase =negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
__UpperCAmelCase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase =[image]
if not all(isinstance(__lowerCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
__UpperCAmelCase =torch.cat([prepare_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for i in image] , dim=0 )
__UpperCAmelCase =image.to(dtype=image_embeds.dtype , device=__lowerCAmelCase )
__UpperCAmelCase =self.movq.encode(__lowerCAmelCase )["""latents"""]
__UpperCAmelCase =latents.repeat_interleave(__lowerCAmelCase , dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase =self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase =timesteps[:1].repeat(batch_size * num_images_per_prompt )
__UpperCAmelCase , __UpperCAmelCase =downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
__UpperCAmelCase =self.prepare_latents(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase ={"""image_embeds""": image_embeds}
__UpperCAmelCase =self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 )
__UpperCAmelCase , __UpperCAmelCase =noise_pred.chunk(2 )
__UpperCAmelCase , __UpperCAmelCase =variance_pred.chunk(2 )
__UpperCAmelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase =self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
__UpperCAmelCase =self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__UpperCAmelCase =image * 0.5 + 0.5
__UpperCAmelCase =image.clamp(0 , 1 )
__UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase =self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 68
|
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase ):
UpperCamelCase__ = num_of_nodes
UpperCamelCase__ = []
UpperCamelCase__ = {}
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase__ = self.find_component(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase__ = self.find_component(__lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCamelCase__ = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _UpperCamelCase ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case : Dict = PegasusConfig
snake_case : Any = {}
snake_case : int = """gelu"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=40 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFPegasusModel(config=__lowerCAmelCase ).get_decoder()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase__ = inputs_dict["""head_mask"""]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def _UpperCamelCase (a__ :List[str] , a__ :Any , a__ :str , a__ :Optional[int]=None , a__ :Union[str, Any]=None , a__ :Optional[int]=None , a__ :Optional[int]=None , a__ :List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : Optional[int] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case : Dict = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case : Optional[int] = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case : Optional[Any] = True
snake_case : int = False
snake_case : int = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFPegasusModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : str = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case : List[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case : Dict = """google/pegasus-xsum"""
@cached_property
def _lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.translate_src_text(**__lowerCAmelCase )
assert self.expected_text == generated_words
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.tokenizer(self.src_text , **__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""tf""" )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
UpperCamelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )
return generated_words
@slow
def _lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 548
| 0
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : List[Any] ):
__lowercase : List[str] = torch.nn.Linear(1_0 , 1_0 )
__lowercase : str = torch.optim.SGD(model.parameters() , 0.1 )
__lowercase : List[str] = Accelerator()
__lowercase : Dict = accelerator.prepare(lowercase__ )
try:
pickle.loads(pickle.dumps(lowercase__ ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 575
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = PegasusConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Dict = use_labels
__lowercase : List[str] = vocab_size
__lowercase : int = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : int = eos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = bos_token_id
def snake_case ( self : int ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ):
__lowercase : Union[str, Any] = 2_0
__lowercase : List[Any] = model_class_name(lowercase__ )
__lowercase : Tuple = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__lowercase : List[Any] = model.decode(lowercase__ , lowercase__ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : Any = 2_0
__lowercase : Any = model_class_name(lowercase__ )
__lowercase : List[Any] = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : str = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int:
"""simple docstring"""
if attention_mask is None:
__lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = FlaxPegasusModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = model_class(lowercase__ )
__lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
__lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ )
__lowercase : Any = np.ones((1, 1) )
__lowercase : Tuple = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )
__lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences
__lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 575
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Optional[int] , *_lowercase :Tuple , **_lowercase :Optional[Any] ):
'''simple docstring'''
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 611
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ['image_processor', 'tokenizer']
__lowerCamelCase = 'BridgeTowerImageProcessor'
__lowerCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self :int , _lowercase :str , _lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
def __call__( self :Union[str, Any] , _lowercase :List[str] , _lowercase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase :bool = True , _lowercase :Union[bool, str, PaddingStrategy] = False , _lowercase :Union[bool, str, TruncationStrategy] = None , _lowercase :Optional[int] = None , _lowercase :int = 0 , _lowercase :Optional[int] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[bool] = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = True , _lowercase :Optional[Union[str, TensorType]] = None , **_lowercase :int , ):
'''simple docstring'''
lowercase__ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
_lowercase , return_tensors=_lowercase , do_normalize=_lowercase , do_center_crop=_lowercase , **_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase ( self :List[Any] , *_lowercase :List[str] , **_lowercase :Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase ( self :Union[str, Any] , *_lowercase :Any , **_lowercase :Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 611
| 1
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
def _lowerCAmelCase ( _UpperCamelCase : nn.ModuleList , _UpperCamelCase : nn.ModuleList , _UpperCamelCase : List[int] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCamelCase ) == len(_UpperCamelCase ), f"{len(_UpperCamelCase )} != {len(_UpperCamelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCamelCase : Any = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCamelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
try:
_SCREAMING_SNAKE_CASE =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(_UpperCamelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(_UpperCamelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowerCAmelCase ( _UpperCamelCase : Union[str, PreTrainedModel] , _UpperCamelCase : Union[str, Path] = "student" , _UpperCamelCase : Union[int, None] = None , _UpperCamelCase : Union[int, None] = None , _UpperCamelCase : str=False , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Union[str, Any]=None , **_UpperCamelCase : Tuple , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCamelCase , _UpperCamelCase ):
AutoTokenizer.from_pretrained(_UpperCamelCase ).save_pretrained(_UpperCamelCase ) # purely for convenience
_SCREAMING_SNAKE_CASE =AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).eval()
else:
assert isinstance(_UpperCamelCase , _UpperCamelCase ), f"teacher must be a model or string got type {type(_UpperCamelCase )}"
_SCREAMING_SNAKE_CASE =teacher.config.to_diff_dict()
try:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_SCREAMING_SNAKE_CASE =teacher_e
if d is None:
_SCREAMING_SNAKE_CASE =teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_SCREAMING_SNAKE_CASE =teacher_e
if d is None:
_SCREAMING_SNAKE_CASE =teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCamelCase )
# Copy weights
_SCREAMING_SNAKE_CASE =teacher.config_class(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =AutoModelForSeqaSeqLM.from_config(_UpperCamelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_SCREAMING_SNAKE_CASE =student.load_state_dict(teacher.state_dict() , strict=_UpperCamelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =list(range(_UpperCamelCase ) ), list(range(_UpperCamelCase ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(_UpperCamelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_SCREAMING_SNAKE_CASE =pick_layers_to_copy(_UpperCamelCase , _UpperCamelCase )
if d_layers_to_copy is None:
_SCREAMING_SNAKE_CASE =pick_layers_to_copy(_UpperCamelCase , _UpperCamelCase )
try:
if hasattr(
_UpperCamelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCamelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCamelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCamelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCamelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCamelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCamelCase )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
_SCREAMING_SNAKE_CASE ={
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCamelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 405
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase = (7_2_0, 1_2_8_0) # Height, Width
_lowerCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase = 1 / 1_0_0
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = 2_5_0
def _lowerCAmelCase ( ) ->None:
"""simple docstring"""
lowercase__ , lowercase__ = get_dataset(lowercase , lowercase )
for index in range(lowercase ):
lowercase__ = random.sample(range(len(lowercase ) ) , 4 )
lowercase__ , lowercase__ , lowercase__ = update_image_and_anno(
lowercase , lowercase , lowercase , lowercase , lowercase , filter_scale=lowercase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ = random_chars(3_2 )
lowercase__ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase__ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , lowercase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase__ = []
for anno in new_annos:
lowercase__ = anno[3] - anno[1]
lowercase__ = anno[4] - anno[2]
lowercase__ = anno[1] + width / 2
lowercase__ = anno[2] + height / 2
lowercase__ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(lowercase )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _lowerCAmelCase ( lowercase : str , lowercase : str ) ->tuple[list, list]:
"""simple docstring"""
lowercase__ = []
lowercase__ = []
for label_file in glob.glob(os.path.join(lowercase , '''*.txt''' ) ):
lowercase__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase ) as in_file:
lowercase__ = in_file.readlines()
lowercase__ = os.path.join(lowercase , F'''{label_name}.jpg''' )
lowercase__ = []
for obj_list in obj_lists:
lowercase__ = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase__ = float(obj[1] ) - float(obj[3] ) / 2
lowercase__ = float(obj[2] ) - float(obj[4] ) / 2
lowercase__ = float(obj[1] ) + float(obj[3] ) / 2
lowercase__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase )
labels.append(lowercase )
return img_paths, labels
def _lowerCAmelCase ( lowercase : list , lowercase : list , lowercase : list[int] , lowercase : tuple[int, int] , lowercase : tuple[float, float] , lowercase : float = 0.0 , ) ->tuple[list, list, str]:
"""simple docstring"""
lowercase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = int(scale_x * output_size[1] )
lowercase__ = int(scale_y * output_size[0] )
lowercase__ = []
lowercase__ = []
for i, index in enumerate(lowercase ):
lowercase__ = all_img_list[index]
path_list.append(lowercase )
lowercase__ = all_annos[index]
lowercase__ = cva.imread(lowercase )
if i == 0: # top-left
lowercase__ = cva.resize(lowercase , (divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = bbox[2] * scale_y
lowercase__ = bbox[3] * scale_x
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase__ = cva.resize(lowercase , (output_size[1] - divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = bbox[2] * scale_y
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase__ = cva.resize(lowercase , (divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = bbox[3] * scale_x
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase__ = cva.resize(
lowercase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowerCAmelCase ( lowercase : int ) ->str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase__ = ascii_lowercase + digits
return "".join(random.choice(lowercase ) for _ in range(lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 161
| 0
|
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self , _SCREAMING_SNAKE_CASE=None ):
a_ = data
a_ = None
def __repr__( self ):
a_ = []
a_ = self
while temp:
string_rep.append(f"""{temp.data}""" )
a_ = temp.next
return "->".join(__snake_case )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list ) -> str:
"""simple docstring"""
if not elements_list:
raise Exception("""The Elements List is empty""" )
a_ = Node(elements_list[0] )
for i in range(1 , len(a_ ) ):
a_ = Node(elements_list[i] )
a_ = current.next
return head
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Node ) -> Tuple:
"""simple docstring"""
if head_node is not None and isinstance(a_ , a_ ):
print_reverse(head_node.next )
print(head_node.data )
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
from doctest import testmod
testmod()
a_ = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(a_ )
print("""Elements in Reverse:""" )
print_reverse(a_ )
if __name__ == "__main__":
main()
| 705
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
a_ = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
a_ = {}
a_ = os.path.join(UpperCamelCase , """all_results.json""" )
if os.path.exists(UpperCamelCase ):
with open(UpperCamelCase , """r""" ) as f:
a_ = json.load(UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
@classmethod
def __magic_name__ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
a_ = tempfile.mkdtemp()
a_ = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
a_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a_ = 7 if get_gpu_count() > 1 else 2
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def __magic_name__ ( self ):
a_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) )
| 403
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( A__ ):
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
class _snake_case ( nn.Module ):
_lowercase : int
_lowercase : Tuple[int] = (16, 32, 96, 2_56)
_lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels) - 1):
SCREAMING_SNAKE_CASE = self.block_out_channels[i]
SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a)
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a)
SCREAMING_SNAKE_CASE = blocks
SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a) -> Any:
SCREAMING_SNAKE_CASE = self.conv_in(a)
SCREAMING_SNAKE_CASE = nn.silu(a)
for block in self.blocks:
SCREAMING_SNAKE_CASE = block(a)
SCREAMING_SNAKE_CASE = nn.silu(a)
SCREAMING_SNAKE_CASE = self.conv_out(a)
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , A__ , A__ ):
_lowercase : int = 32
_lowercase : int = 4
_lowercase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase : Union[bool, Tuple[bool]] = False
_lowercase : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
_lowercase : int = 2
_lowercase : Union[int, Tuple[int]] = 8
_lowercase : Optional[Union[int, Tuple[int]]] = None
_lowercase : int = 12_80
_lowercase : float = 0.0
_lowercase : bool = False
_lowercase : jnp.dtype = jnp.floataa
_lowercase : bool = True
_lowercase : int = 0
_lowercase : str = "rgb"
_lowercase : Tuple[int] = (16, 32, 96, 2_56)
def SCREAMING_SNAKE_CASE__ ( self , a) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE = jnp.zeros(a , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa)
SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE = jnp.zeros(a , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jax.random.split(a)
SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(a , a , a , a , a)["params"]
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.block_out_channels
SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(a , dtype=self.dtype)
SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(a , a):
SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types)
if isinstance(a , a):
SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types)
# down
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = block_out_channels[0]
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
for i, down_block_type in enumerate(self.down_block_types):
SCREAMING_SNAKE_CASE = output_channel
SCREAMING_SNAKE_CASE = block_out_channels[i]
SCREAMING_SNAKE_CASE = i == len(a) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a)
for _ in range(self.layers_per_block):
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
if not is_final_block:
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
SCREAMING_SNAKE_CASE = down_blocks
SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE = block_out_channels[-1]
SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a , a , a , a , a = 1.0 , a = True , a = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE = jnp.flip(a , axis=1)
# 1. time
if not isinstance(a , jnp.ndarray):
SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(a , jnp.ndarray) and len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa)
SCREAMING_SNAKE_CASE = jnp.expand_dims(a , 0)
SCREAMING_SNAKE_CASE = self.time_proj(a)
SCREAMING_SNAKE_CASE = self.time_embedding(a)
# 2. pre-process
SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1))
SCREAMING_SNAKE_CASE = self.conv_in(a)
SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1))
SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(a)
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(a , a):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = down_block(a , a , a , deterministic=not train)
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = down_block(a , a , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE = self.mid_block(a , a , a , deterministic=not train)
# 5. contronet blocks
SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(a , self.controlnet_down_blocks):
SCREAMING_SNAKE_CASE = controlnet_block(a)
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE = self.controlnet_mid_block(a)
# 6. scaling
SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a , mid_block_res_sample=a)
| 73
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase = Features({'image': Image()} )
__UpperCAmelCase = Features({'labels': ClassLabel} )
__UpperCAmelCase = "image"
__UpperCAmelCase = "labels"
def __snake_case ( self : int, _snake_case : Union[str, Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column], _snake_case ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case : List[str] =copy.deepcopy(self )
snake_case : List[str] =self.label_schema.copy()
snake_case : int =features[self.label_column]
snake_case : List[Any] =label_schema
return task_template
@property
def __snake_case ( self : str ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 349
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=7 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : List[Any]=30 , __lowerCAmelCase : Any=400 , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : int=None , ) -> Tuple:
lowercase_ = size if size is not None else {"shortest_edge": 20}
lowercase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_center_crop
lowercase_ = crop_size
def __UpperCAmelCase ( self : str) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =MobileNetVaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[str]) -> str:
lowercase_ = MobileNetVaImageProcessingTester(self)
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Dict) -> Any:
lowercase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , "do_resize"))
self.assertTrue(hasattr(__lowerCAmelCase , "size"))
self.assertTrue(hasattr(__lowerCAmelCase , "do_center_crop"))
self.assertTrue(hasattr(__lowerCAmelCase , "crop_size"))
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def __UpperCAmelCase ( self : int) -> str:
pass
def __UpperCAmelCase ( self : Any) -> Dict:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ = image_processing(__lowerCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase ( self : int) -> List[Any]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ = image_processing(__lowerCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase ( self : int) -> Dict:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ = image_processing(__lowerCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 461
|
'''simple docstring'''
import functools
from typing import Any
def __a ( __lowerCamelCase : str , __lowerCamelCase : list[str] ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowercase_ = {}
lowercase_ = "WORD_KEEPER"
for word in words:
lowercase_ = trie
for c in word:
if c not in trie_node:
lowercase_ = {}
lowercase_ = trie_node[c]
lowercase_ = True
lowercase_ = len(__lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCamelCase : int ) -> bool:
if index == len_string:
return True
lowercase_ = trie
for i in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = trie_node.get(string[i] , __lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCamelCase , __lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 461
| 1
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
snake_case = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Optional[Any] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = eval_examples
SCREAMING_SNAKE_CASE : List[Any] = post_process_function
SCREAMING_SNAKE_CASE : Any = quant_trainer_args
SCREAMING_SNAKE_CASE : Optional[Any] = 128 # default number of calibration samples
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
SCREAMING_SNAKE_CASE : str = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE : str = self._remove_unused_columns(UpperCAmelCase_ , description="Calibration" )
return DataLoader(
UpperCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase_ , )
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[int]=None ):
SCREAMING_SNAKE_CASE : Any = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE : List[Any] = self.get_calib_dataloader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args , calib=UpperCAmelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase_ )
logger.info("***** Running calibration *****" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCAmelCase_ ):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.prediction_step(UpperCAmelCase_ , UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE : Optional[int] = model
def _A ( self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str = "eval" ):
SCREAMING_SNAKE_CASE : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : Tuple = self.get_eval_dataloader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : int = eval_loop(
UpperCAmelCase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
SCREAMING_SNAKE_CASE : int = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions )
SCREAMING_SNAKE_CASE : Any = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = metrics.pop(UpperCAmelCase_ )
self.log(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ )
return metrics
def _A ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str = "test" ):
SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(UpperCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : str = eval_loop(
UpperCAmelCase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
SCREAMING_SNAKE_CASE : Union[str, Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : Optional[Any] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , "predict" )
SCREAMING_SNAKE_CASE : str = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : str = metrics.pop(UpperCAmelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : int="./" ):
SCREAMING_SNAKE_CASE : List[Any] = self.eval_dataset
SCREAMING_SNAKE_CASE : List[Any] = self.get_eval_dataloader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = next(iter(UpperCAmelCase_ ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
SCREAMING_SNAKE_CASE : Tuple = tuple(v.to(UpperCAmelCase_ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Any = self.model.to(UpperCAmelCase_ )
model.eval()
model.float()
SCREAMING_SNAKE_CASE : str = model.module if hasattr(UpperCAmelCase_ , "module" ) else model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCAmelCase_ , "model.onnx" )
logger.info(f'''exporting model to {output_model_file}''' )
SCREAMING_SNAKE_CASE : int = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , export_params=UpperCAmelCase_ , opset_version=13 , do_constant_folding=UpperCAmelCase_ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=UpperCAmelCase_ , )
logger.info("onnx export finished" )
| 62
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
_A : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_A : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {}
with open(lowerCAmelCase_ , '''r''' ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = line.strip()
if line:
SCREAMING_SNAKE_CASE__ = line.split()
SCREAMING_SNAKE_CASE__ = line_number
SCREAMING_SNAKE_CASE__ = words[0]
SCREAMING_SNAKE_CASE__ = value
return result
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ = value[0]
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ = key
SCREAMING_SNAKE_CASE__ = value if '''lm_head''' in full_key else value[0]
_A : Union[str, Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
SCREAMING_SNAKE_CASE__ = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = '''weight'''
else:
SCREAMING_SNAKE_CASE__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> int:
if config_path is not None:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ = read_txt_into_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = WavaVecaForSequenceClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaForCTC(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE__ = fairseq.tasks.setup_task(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_A : List[str] = parser.parse_args()
_A : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 100
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A ( __snake_case ):
__magic_name__ = ['''image_processor''']
__magic_name__ = '''SamImageProcessor'''
def __init__( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.image_processor
A : int = -10
A : Optional[Any] = self.image_processor.size['''longest_edge''']
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
A : List[str] = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
A : List[Any] = encoding_image_processor['''original_sizes''']
if hasattr(SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks if Torch or TF tensor
A : List[str] = original_sizes.numpy()
A : Union[str, Any] = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , input_boxes=SCREAMING_SNAKE_CASE , )
A : List[Any] = self._normalize_and_convert(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , input_boxes=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="pt" , ) -> Any:
"""simple docstring"""
if input_points is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
A : str = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points
]
else:
A : Tuple = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for point, original_size in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
A : Any = self._pad_points_and_labels(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = np.array(SCREAMING_SNAKE_CASE )
if input_labels is not None:
A : List[str] = np.array(SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
A : Any = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , is_bounding_box=SCREAMING_SNAKE_CASE )
for box, original_size in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
A : Dict = np.array(SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
A : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
A : List[str] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
A : Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
A : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
A : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A : Tuple = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
A : List[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A : str = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
A : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A : Union[str, Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
A : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A : str = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = max([point.shape[0] for point in input_points] )
A : Any = []
for i, point in enumerate(SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
A : int = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
A : Any = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE )
A : Dict = processed_input_points
return input_points, input_labels
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> np.ndarray:
"""simple docstring"""
A : Dict = original_size
A : Tuple = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE , longest_edge=SCREAMING_SNAKE_CASE )
A : Tuple = deepcopy(SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE )
if is_bounding_box:
A : Optional[int] = coords.reshape(-1 , 2 , 2 )
A : Optional[Any] = coords[..., 0] * (new_w / old_w)
A : Union[str, Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
A : Union[str, Any] = coords.reshape(-1 , 4 )
return coords
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks for TF or Torch tensor
A : Tuple = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE ):
raise ValueError('''Input points must be a list of list of floating points.''' )
A : List[Any] = [np.array(SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
A : Optional[int] = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE , '''numpy''' ):
A : Dict = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE ):
raise ValueError('''Input labels must be a list of list integers.''' )
A : int = [np.array(SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
A : str = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE , '''numpy''' ):
A : Tuple = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
A : Union[str, Any] = [np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
A : Tuple = None
return input_points, input_labels, input_boxes
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Any = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 711
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = torch.load(snake_case__ , map_location='''cpu''' )
A : Optional[Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A : Any = v
else:
A : int = v
A : str = chkpt['''params''']
A : Tuple = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
A : Dict = chkpt['''dico_word2id''']
A : Optional[int] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
A : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
A : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
A : Optional[int] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case__ , snake_case__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
if "model" in sd.keys():
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
UpperCAmelCase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase = sd.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase = sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
UpperCAmelCase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
UpperCAmelCase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
UpperCAmelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
UpperCAmelCase = q
UpperCAmelCase = k
UpperCAmelCase = v
del sd[key]
return sd
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> int:
"""simple docstring"""
UpperCAmelCase = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
UpperCAmelCase = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = OPTConfig()
UpperCAmelCase = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
a__ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 51
|
snake_case = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 424
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Any = data
A_ : Dict = [0x6_7_4_5_2_3_0_1, 0xe_f_c_d_a_b_8_9, 0x9_8_b_a_d_c_f_e, 0x1_0_3_2_5_4_7_6, 0xc_3_d_2_e_1_f_0]
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xf_f_f_f_f_f_f_f
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[str] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
A_ : Optional[Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _snake_case ( self )->int:
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : Tuple = list(struct.unpack('''>16L''' , _SCREAMING_SNAKE_CASE ) ) + [0] * 64
for i in range(16 , 80 ):
A_ : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[Any] = self.padding()
A_ : Union[str, Any] = self.split_blocks()
for block in self.blocks:
A_ : Optional[int] = self.expand_block(_SCREAMING_SNAKE_CASE )
A_ , A_ , A_ , A_ , A_ : int = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
A_ : Any = (b & c) | ((~b) & d)
A_ : str = 0x5_a_8_2_7_9_9_9
elif 20 <= i < 40:
A_ : Optional[Any] = b ^ c ^ d
A_ : List[str] = 0x6_e_d_9_e_b_a_1
elif 40 <= i < 60:
A_ : List[Any] = (b & c) | (b & d) | (c & d)
A_ : Optional[int] = 0x8_f_1_b_b_c_d_c
elif 60 <= i < 80:
A_ : Any = b ^ c ^ d
A_ : Any = 0xc_a_6_2_c_1_d_6
A_ , A_ , A_ , A_ , A_ : Optional[int] = (
self.rotate(_SCREAMING_SNAKE_CASE , 5 ) + f + e + k + expanded_block[i] & 0xf_f_f_f_f_f_f_f,
a,
self.rotate(_SCREAMING_SNAKE_CASE , 30 ),
c,
d,
)
A_ : List[str] = (
self.h[0] + a & 0xf_f_f_f_f_f_f_f,
self.h[1] + b & 0xf_f_f_f_f_f_f_f,
self.h[2] + c & 0xf_f_f_f_f_f_f_f,
self.h[3] + d & 0xf_f_f_f_f_f_f_f,
self.h[4] + e & 0xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = B'''Test String'''
assert SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
A_ : Optional[int] = parser.parse_args()
A_ : Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
A_ : Any = f.read()
else:
A_ : Union[str, Any] = bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
print(SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 152
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase = dict(zip(vocab, range(len(vocab))))
UpperCamelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(tmpdirname)
UpperCamelCase = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
UpperCamelCase = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
UpperCamelCase = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
UpperCamelCase = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
UpperCamelCase = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
UpperCamelCase = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 152
| 1
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
__lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
__lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__lowerCAmelCase = f"""down_blocks.{i}.resnets.{j}."""
__lowerCAmelCase = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__lowerCAmelCase = f"""down_blocks.{i}.attentions.{j}."""
__lowerCAmelCase = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__lowerCAmelCase = f"""up_blocks.{i}.resnets.{j}."""
__lowerCAmelCase = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__lowerCAmelCase = f"""up_blocks.{i}.attentions.{j}."""
__lowerCAmelCase = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__lowerCAmelCase = f"""down_blocks.{i}.downsamplers.0.conv."""
__lowerCAmelCase = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__lowerCAmelCase = f"""up_blocks.{i}.upsamplers.0."""
__lowerCAmelCase = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__lowerCAmelCase = "mid_block.attentions.0."
__lowerCAmelCase = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__lowerCAmelCase = f"""mid_block.resnets.{j}."""
__lowerCAmelCase = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __UpperCamelCase ( lowercase_ : Dict ):
"""simple docstring"""
a_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
a_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
a_ = v.replace(lowercase_ , lowercase_ )
a_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
a_ = v.replace(lowercase_ , lowercase_ )
a_ = v
a_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__lowerCAmelCase = f"""encoder.down_blocks.{i}.resnets.{j}."""
__lowerCAmelCase = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__lowerCAmelCase = f"""down_blocks.{i}.downsamplers.0."""
__lowerCAmelCase = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__lowerCAmelCase = f"""up_blocks.{i}.upsamplers.0."""
__lowerCAmelCase = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__lowerCAmelCase = f"""decoder.up_blocks.{i}.resnets.{j}."""
__lowerCAmelCase = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__lowerCAmelCase = f"""mid_block.resnets.{i}."""
__lowerCAmelCase = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def __UpperCamelCase ( lowercase_ : Optional[Any] ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __UpperCamelCase ( lowercase_ : Optional[int] ):
"""simple docstring"""
a_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
a_ = v.replace(lowercase_ , lowercase_ )
a_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
a_ = v.replace(lowercase_ , lowercase_ )
a_ = v
a_ = {v: vae_state_dict[k] for k, v in mapping.items()}
a_ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
a_ = reshape_weight_for_sd(lowercase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
__lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__lowerCAmelCase = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__lowerCAmelCase = {"q": 0, "k": 1, "v": 2}
def __UpperCamelCase ( lowercase_ : Tuple ):
"""simple docstring"""
a_ = {}
a_ = {}
a_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
a_ = k[: -len('.q_proj.weight' )]
a_ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
a_ = [None, None, None]
a_ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
a_ = k[: -len('.q_proj.bias' )]
a_ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
a_ = [None, None, None]
a_ = v
continue
a_ = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
a_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
a_ = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
a_ = torch.cat(lowercase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
a_ = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
a_ = torch.cat(lowercase_ )
return new_state_dict
def __UpperCamelCase ( lowercase_ : Any ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
__lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__lowerCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
__lowerCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
__lowerCAmelCase = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__lowerCAmelCase = load_file(unet_path, device="cpu")
else:
__lowerCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
__lowerCAmelCase = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
__lowerCAmelCase = load_file(vae_path, device="cpu")
else:
__lowerCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
__lowerCAmelCase = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
__lowerCAmelCase = load_file(text_enc_path, device="cpu")
else:
__lowerCAmelCase = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
__lowerCAmelCase = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
__lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
__lowerCAmelCase = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
__lowerCAmelCase = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__lowerCAmelCase = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__lowerCAmelCase = {"transformer." + k: v for k, v in text_enc_dict.items()}
__lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
__lowerCAmelCase = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
__lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
__lowerCAmelCase = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__lowerCAmelCase = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 536
|
'''simple docstring'''
import math
import qiskit
def __UpperCamelCase ( lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : int = 1 ):
"""simple docstring"""
if (
isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
a_ = qiskit.QuantumRegister(4 , 'qr' )
a_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
a_ = [input_a, input_a, carry_in]
a_ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase_ ) # measure the last two qbits
a_ = qiskit.Aer.get_backend('aer_simulator' )
a_ = qiskit.execute(lowercase_ , lowercase_ , shots=1_000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 536
| 1
|
'''simple docstring'''
from math import factorial
def UpperCAmelCase_ ( lowercase__ = 2_0 ) -> List[Any]:
'''simple docstring'''
a_ =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
a_ =n // 2
return int(factorial(lowercase__ ) / (factorial(lowercase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : str = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = ['CLIPFeatureExtractor']
_UpperCamelCase : List[str] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : Any =logging.get_logger(__name__)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = ["""input_features""", """is_longer"""]
def __init__( self , _lowercase=64 , _lowercase=48000 , _lowercase=480 , _lowercase=10 , _lowercase=1024 , _lowercase=0.0 , _lowercase=False , _lowercase = 0 , _lowercase = 14000 , _lowercase = None , _lowercase = "fusion" , _lowercase = "repeatpad" , **_lowercase , ) -> Union[str, Any]:
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
_lowerCamelCase : int = top_db
_lowerCamelCase : int = truncation
_lowerCamelCase : List[str] = padding
_lowerCamelCase : Optional[Any] = fft_window_size
_lowerCamelCase : str = (fft_window_size >> 1) + 1
_lowerCamelCase : List[str] = hop_length
_lowerCamelCase : str = max_length_s
_lowerCamelCase : Dict = max_length_s * sampling_rate
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : List[Any] = frequency_min
_lowerCamelCase : Optional[int] = frequency_max
_lowerCamelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm=_lowercase , mel_scale='''htk''' , )
_lowerCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def a__ ( self ) -> Dict[str, Any]:
_lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a__ ( self , _lowercase , _lowercase = None ) -> np.ndarray:
_lowerCamelCase : Optional[int] = spectrogram(
_lowercase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def a__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
_lowerCamelCase : Any = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : Optional[int] = [0]
# randomly choose index for each part
_lowerCamelCase : Dict = np.random.choice(ranges[0] )
_lowerCamelCase : str = np.random.choice(ranges[1] )
_lowerCamelCase : Optional[Any] = np.random.choice(ranges[2] )
_lowerCamelCase : Tuple = mel[idx_front : idx_front + chunk_frames, :]
_lowerCamelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
_lowerCamelCase : Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :]
_lowerCamelCase : Any = torch.tensor(mel[None, None, :] )
_lowerCamelCase : Dict = torch.nn.functional.interpolate(
_lowercase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_lowercase )
_lowerCamelCase : Optional[Any] = mel_shrink[0][0].numpy()
_lowerCamelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowerCamelCase : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowerCamelCase : List[Any] = len(_lowercase ) - max_length
_lowerCamelCase : Dict = np.random.randint(0 , overflow + 1 )
_lowerCamelCase : int = waveform[idx : idx + max_length]
_lowerCamelCase : Any = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowerCamelCase : Tuple = self._np_extract_fbank_features(_lowercase , self.mel_filters )
_lowerCamelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowerCamelCase : Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowerCamelCase : Tuple = np.stack([mel, mel, mel, mel] , axis=0 )
_lowerCamelCase : int = False
else:
_lowerCamelCase : str = self._random_mel_fusion(_lowercase , _lowercase , _lowercase )
_lowerCamelCase : Dict = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowerCamelCase : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowerCamelCase : Tuple = int(max_length / len(_lowercase ) )
_lowerCamelCase : Tuple = np.stack(np.tile(_lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowerCamelCase : List[str] = int(max_length / len(_lowercase ) )
_lowerCamelCase : str = np.stack(np.tile(_lowercase , _lowercase ) )
_lowerCamelCase : Any = np.pad(_lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_lowerCamelCase : str = self._np_extract_fbank_features(_lowercase , self.mel_filters )
_lowerCamelCase : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowerCamelCase : List[str] = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> BatchFeature:
_lowerCamelCase : Union[str, Any] = truncation if truncation is not None else self.truncation
_lowerCamelCase : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase : Tuple = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase : str = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : Optional[Any] = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCamelCase : List[str] = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Union[str, Any] = [np.asarray(_lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
_lowerCamelCase : Optional[Any] = [
self._get_input_mel(_lowercase , max_length if max_length else self.nb_max_samples , _lowercase , _lowercase )
for waveform in raw_speech
]
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = []
for mel, longer in padded_inputs:
input_mel.append(_lowercase )
is_longer.append(_lowercase )
if truncation == "fusion" and sum(_lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowerCamelCase : int = np.random.randint(0 , len(_lowercase ) )
_lowerCamelCase : List[str] = True
if isinstance(input_mel[0] , _lowercase ):
_lowerCamelCase : str = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowerCamelCase : Dict = [[longer] for longer in is_longer]
_lowerCamelCase : List[Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
_lowerCamelCase : Tuple = BatchFeature(_lowercase )
if return_tensors is not None:
_lowerCamelCase : str = input_features.convert_to_tensors(_lowercase )
return input_features
| 434
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=0.01 , SCREAMING_SNAKE_CASE_ : List[Any]=1_000 ):
lowerCAmelCase__ = p_stop
lowerCAmelCase__ = max_length
def __iter__( self : List[Any] ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase__ = random.random() < self.p_stop
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : int=True ):
lowerCAmelCase__ = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
lowerCAmelCase__ = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase__ = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : List[Any]=False ):
random.seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
lowerCAmelCase__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
lowerCAmelCase__ = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
lowerCAmelCase__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase__ = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __snake_case ( self : str ):
Accelerator()
lowerCAmelCase__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 288
|
from math import isclose, sqrt
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple[float, float, float]:
'''simple docstring'''
lowerCAmelCase__ = point_y / 4 / point_x
lowerCAmelCase__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ = outgoing_gradient**2 + 4
lowerCAmelCase__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCAmelCase__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus
lowerCAmelCase__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase_ (lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = first_x_coord
lowerCAmelCase__ = first_y_coord
lowerCAmelCase__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = next_point(lowercase__ , lowercase__ , lowercase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 288
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = 'bloom'
__lowercase : str = ['past_key_values']
__lowercase : Optional[Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self:Union[str, Any] , _a:int=25_08_80 , _a:Dict=64 , _a:List[Any]=2 , _a:Optional[int]=8 , _a:int=1e-5 , _a:List[str]=0.02 , _a:Union[str, Any]=True , _a:List[str]=1 , _a:Tuple=2 , _a:Any=False , _a:Optional[Any]=0.0 , _a:int=0.0 , _a:Any=1 , _a:str=False , **_a:Any , ):
snake_case__ = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ = kwargs.pop('''n_embed''' , _a )
snake_case__ = hidden_size if n_embed is None else n_embed
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = layer_norm_epsilon
snake_case__ = initializer_range
snake_case__ = use_cache
snake_case__ = pretraining_tp
snake_case__ = apply_residual_connection_post_layernorm
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = bos_token_id
snake_case__ = eos_token_id
snake_case__ = slow_but_exact
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = version.parse('1.12' )
def __init__( self:Union[str, Any] , _a:PretrainedConfig , _a:str = "default" , _a:List[PatchingSpec] = None , _a:bool = False , ):
super().__init__(_a , task=_a , patching_specs=_a , use_past=_a )
if not getattr(self._config , '''pad_token_id''' , _a ):
# TODO: how to do that better?
snake_case__ = 0
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_a , direction='''inputs''' , inverted_values_shape=_a )
snake_case__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:"PreTrainedTokenizer" , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional["TensorType"] = None , ):
snake_case__ = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
snake_case__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ = seqlen + 2
snake_case__ = self._config.hidden_size // self.num_attention_heads
snake_case__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case__ = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
snake_case__ = common_inputs['''attention_mask''']
if self.use_past:
snake_case__ = ordered_inputs['''attention_mask'''].dtype
snake_case__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return 13
| 33
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class SCREAMING_SNAKE_CASE (pl.LightningModule ):
def __init__( self : Optional[Any] , a : Any )-> str:
"""simple docstring"""
super().__init__()
lowercase__ = model
lowercase__ = 2
lowercase__ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
pass
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# load longformer model from model identifier
lowercase__ = LongformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowercase__ = LightningModel(_SCREAMING_SNAKE_CASE )
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowercase__ = LongformerForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 235
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase ( lowerCAmelCase : Optional[int]):
"""simple docstring"""
_A : Dict = SwinConfig()
_A : Optional[int] = swin_name.split('''_''')
_A : List[Any] = name_split[1]
_A : Optional[Any] = int(name_split[4])
_A : List[Any] = int(name_split[3][-1])
if model_size == "tiny":
_A : int = 96
_A : Optional[Any] = (2, 2, 6, 2)
_A : Optional[int] = (3, 6, 12, 24)
elif model_size == "small":
_A : List[str] = 96
_A : int = (2, 2, 18, 2)
_A : Optional[int] = (3, 6, 12, 24)
elif model_size == "base":
_A : Any = 128
_A : List[Any] = (2, 2, 18, 2)
_A : List[Any] = (4, 8, 16, 32)
else:
_A : Any = 192
_A : List[Any] = (2, 2, 18, 2)
_A : Dict = (6, 12, 24, 48)
if "in22k" in swin_name:
_A : Optional[Any] = 2_1841
else:
_A : Union[str, Any] = 1000
_A : str = '''huggingface/label-files'''
_A : Optional[Any] = '''imagenet-1k-id2label.json'''
_A : Dict = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='''dataset''') , '''r'''))
_A : Optional[Any] = {int(lowerCAmelCase): v for k, v in idalabel.items()}
_A : Tuple = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
_A : int = img_size
_A : Optional[int] = num_classes
_A : str = embed_dim
_A : List[str] = depths
_A : str = num_heads
_A : str = window_size
return config
def lowercase ( lowerCAmelCase : str):
"""simple docstring"""
if "patch_embed.proj" in name:
_A : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "patch_embed.norm" in name:
_A : str = name.replace('''patch_embed.norm''' , '''embeddings.norm''')
if "layers" in name:
_A : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
_A : List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
_A : Union[str, Any] = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
_A : Dict = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
_A : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
_A : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
_A : List[str] = name.replace('''mlp.fc2''' , '''output.dense''')
if name == "norm.weight":
_A : Tuple = '''layernorm.weight'''
if name == "norm.bias":
_A : Optional[Any] = '''layernorm.bias'''
if "head" in name:
_A : List[Any] = name.replace('''head''' , '''classifier''')
else:
_A : Tuple = '''swin.''' + name
return name
def lowercase ( lowerCAmelCase : int , lowerCAmelCase : List[Any]):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_A : Tuple = orig_state_dict.pop(lowerCAmelCase)
if "mask" in key:
continue
elif "qkv" in key:
_A : Dict = key.split('''.''')
_A : Tuple = int(key_split[1])
_A : Optional[Any] = int(key_split[3])
_A : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_A : int = val[:dim, :]
_A : Tuple = val[
dim : dim * 2, :
]
_A : List[str] = val[-dim:, :]
else:
_A : Tuple = val[
:dim
]
_A : Tuple = val[
dim : dim * 2
]
_A : int = val[
-dim:
]
else:
_A : Union[str, Any] = val
return orig_state_dict
def lowercase ( lowerCAmelCase : List[str] , lowerCAmelCase : int):
"""simple docstring"""
_A : List[str] = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase)
timm_model.eval()
_A : int = get_swin_config(lowerCAmelCase)
_A : int = SwinForImageClassification(lowerCAmelCase)
model.eval()
_A : str = convert_state_dict(timm_model.state_dict() , lowerCAmelCase)
model.load_state_dict(lowerCAmelCase)
_A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A : int = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''')))
_A : Optional[Any] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase).raw)
_A : Tuple = image_processor(images=lowerCAmelCase , return_tensors='''pt''')
_A : Optional[int] = timm_model(inputs['''pixel_values'''])
_A : Optional[int] = model(**lowerCAmelCase).logits
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3)
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCAmelCase)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCAmelCase)
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 716
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def _lowerCamelCase ( self ) -> int:
_A : List[Any] = 0
_A : List[str] = False
while not completed:
if counter == 1:
self.reset()
_A : Dict = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_A , _A , _A : List[Any] = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def _lowerCamelCase ( self ) -> int:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self ) -> Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self ) -> str:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self , UpperCAmelCase__=False ) -> Dict:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> Optional[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_A : Union[str, Any] = token_ids
_A : Dict = len(self.token_ids )
_A : Union[str, Any] = -1 # the index of the currently fulfilled step
_A : str = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Dict:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
_A : str = False
_A : int = False
_A : List[str] = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
_A : Optional[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
_A : List[str] = True
_A : Union[str, Any] = completed
else:
# failed to make progress.
_A : Optional[int] = True
self.reset()
return stepped, completed, reset
def _lowerCamelCase ( self ) -> List[str]:
_A : List[str] = False
_A : int = 0
def _lowerCamelCase ( self ) -> Union[str, Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def _lowerCamelCase ( self , UpperCAmelCase__=False ) -> str:
_A : Tuple = PhrasalConstraint(self.token_ids )
if stateful:
_A : Optional[Any] = self.seqlen
_A : Any = self.fulfilled_idx
_A : Dict = self.completed
return new_constraint
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=True ) -> Optional[int]:
_A : int = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
_A : int = {}
for token_ids in nested_token_ids:
_A : Any = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
_A : Tuple = {}
_A : Optional[int] = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
_A : str = root
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> str:
_A : Tuple = self.trie
for current_token in current_seq:
_A : Any = start[current_token]
_A : List[Any] = list(start.keys() )
return next_tokens
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
_A : int = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Union[str, Any]:
_A : Dict = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
_A : Dict = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> Any:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_A : List[str] = DisjunctiveTrie(UpperCAmelCase__ )
_A : List[Any] = nested_token_ids
_A : Tuple = self.trie.max_height
_A : Any = []
_A : List[str] = False
def _lowerCamelCase ( self ) -> List[Any]:
_A : List[str] = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Dict:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
_A : Dict = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
_A : int = False
_A : Tuple = False
_A : List[Any] = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
_A : Tuple = True
else:
_A : str = True
self.reset()
_A : Optional[int] = self.trie.reached_leaf(self.current_seq )
_A : List[str] = completed
return stepped, completed, reset
def _lowerCamelCase ( self ) -> List[Any]:
_A : str = False
_A : Any = []
def _lowerCamelCase ( self ) -> Any:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowerCamelCase ( self , UpperCAmelCase__=False ) -> Union[str, Any]:
_A : int = DisjunctiveConstraint(self.token_ids )
if stateful:
_A : Optional[Any] = self.seqlen
_A : Tuple = self.current_seq
_A : List[str] = self.completed
return new_constraint
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> int:
_A : Tuple = constraints
# max # of steps required to fulfill a given constraint
_A : List[Any] = max([c.seqlen for c in constraints] )
_A : Optional[int] = len(UpperCAmelCase__ )
_A : int = False
self.init_state()
def _lowerCamelCase ( self ) -> Any:
_A : Optional[Any] = []
_A : Any = None
_A : List[Any] = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def _lowerCamelCase ( self ) -> List[str]:
_A : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowerCamelCase ( self ) -> Dict:
_A : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_A : Dict = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
_A : Tuple = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_A , _A : List[str] = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
_A , _A : Tuple = False, False
if self.completed:
_A : Optional[Any] = True
_A : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_A , _A , _A : Optional[Any] = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
_A : List[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_A : List[Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
_A : List[str] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
_A , _A , _A : Tuple = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
_A : Any = None
if not complete and stepped:
_A : List[str] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_A : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_A : Tuple = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowerCamelCase ( self , UpperCAmelCase__=True ) -> Optional[int]:
_A : Optional[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_A : List[str] = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_A : int = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
_A : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 417
| 0
|
from collections.abc import Callable
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ = None ) -> None:
'''simple docstring'''
__snake_case :list = []
# Stores indexes of each item for supporting updates and deletion.
__snake_case :dict = {}
# Stores current size of heap.
__snake_case :List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__snake_case :Optional[Any] = key or (lambda a__ : x)
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
__snake_case :int = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self , a__ ) -> int | None:
'''simple docstring'''
__snake_case :Optional[int] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
__snake_case , __snake_case :Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__snake_case , __snake_case :Tuple = self.arr[j], self.arr[i]
def __lowercase ( self , a__ , a__ ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self , a__ ) -> int:
'''simple docstring'''
__snake_case :int = self._left(a__ )
__snake_case :Optional[int] = self._right(a__ )
__snake_case :List[Any] = i
if left is not None and not self._cmp(a__ , a__ ):
__snake_case :int = left
if right is not None and not self._cmp(a__ , a__ ):
__snake_case :Tuple = right
return valid_parent
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
__snake_case :int = self._parent(a__ )
while parent is not None and not self._cmp(a__ , a__ ):
self._swap(a__ , a__ )
__snake_case , __snake_case :str = parent, self._parent(a__ )
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
__snake_case :List[Any] = self._get_valid_parent(a__ )
while valid_parent != index:
self._swap(a__ , a__ )
__snake_case , __snake_case :Optional[int] = valid_parent, self._get_valid_parent(a__ )
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__snake_case :List[Any] = self.pos_map[item]
__snake_case :Optional[Any] = [item, self.key(a__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(a__ )
self._heapify_down(a__ )
def __lowercase ( self , a__ ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__snake_case :Dict = self.pos_map[item]
del self.pos_map[item]
__snake_case :Dict = self.arr[self.size - 1]
__snake_case :List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(a__ )
self._heapify_down(a__ )
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
__snake_case :Optional[int] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(a__ )] )
else:
__snake_case :Optional[Any] = [item, self.key(a__ )]
__snake_case :str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowercase ( self ) -> tuple | None:
'''simple docstring'''
__snake_case :Optional[Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455
|
from __future__ import annotations
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ ) -> None:
'''simple docstring'''
__snake_case :str = order
# a_{0} ... a_{k}
__snake_case :int = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__snake_case :Any = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__snake_case :List[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
__snake_case :int = [0.0] * self.order
def __lowercase ( self , a__ , a__ ) -> None:
'''simple docstring'''
if len(a__ ) < self.order:
__snake_case :Optional[int] = [1.0, *a_coeffs]
if len(a__ ) != self.order + 1:
__snake_case :Dict = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(a__ )}'''
)
raise ValueError(a__ )
if len(a__ ) != self.order + 1:
__snake_case :Optional[Any] = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(a__ )}'''
)
raise ValueError(a__ )
__snake_case :str = a_coeffs
__snake_case :str = b_coeffs
def __lowercase ( self , a__ ) -> float:
'''simple docstring'''
__snake_case :List[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__snake_case :Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__snake_case :Optional[int] = self.input_history[:-1]
__snake_case :Optional[Any] = self.output_history[:-1]
__snake_case :Union[str, Any] = sample
__snake_case :Optional[Any] = result
return result
| 455
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a__: Optional[int] = logging.getLogger(__name__)
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] )->Any:
A__ = np.argmax(UpperCamelCase__ , axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->List[Any]:
with open(UpperCamelCase__ , encoding='''utf_8''' ) as f:
A__ = csv.reader(UpperCamelCase__ )
A__ = []
next(UpperCamelCase__ ) # skip the first line
for line in tqdm(UpperCamelCase__ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] )->Dict:
A__ = []
for dataset in encoded_datasets:
A__ = len(UpperCamelCase__ )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase__ ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(UpperCamelCase__ ) - 1
A__ = len(UpperCamelCase__ ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase__( )->Dict:
A__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=UpperCamelCase__ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=UpperCamelCase__ , default='''''' )
parser.add_argument('''--seed''' , type=UpperCamelCase__ , default=42 )
parser.add_argument('''--num_train_epochs''' , type=UpperCamelCase__ , default=3 )
parser.add_argument('''--train_batch_size''' , type=UpperCamelCase__ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=UpperCamelCase__ , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=UpperCamelCase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=UpperCamelCase__ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=UpperCamelCase__ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=UpperCamelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=UpperCamelCase__ , default=6.2_5e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCamelCase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=UpperCamelCase__ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=UpperCamelCase__ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=UpperCamelCase__ , default=0.9 )
parser.add_argument('''--n_valid''' , type=UpperCamelCase__ , default=3_74 )
parser.add_argument('''--server_ip''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
A__ = parser.parse_args()
print(UpperCamelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A__ = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(UpperCamelCase__ , UpperCamelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ['''_start_''', '''_delimiter_''', '''_classify_''']
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase__ )
A__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
model.to(UpperCamelCase__ )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase__ : Any ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return obj
return [tokenize_and_encode(UpperCamelCase__ ) for o in obj]
logger.info('''Encoding dataset...''' )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(UpperCamelCase__ )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(UpperCamelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*UpperCamelCase__ )
A__ = RandomSampler(UpperCamelCase__ )
A__ = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.train_batch_size )
A__ = TensorDataset(*UpperCamelCase__ )
A__ = SequentialSampler(UpperCamelCase__ )
A__ = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
A__ = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
A__ = AdamW(UpperCamelCase__ , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
UpperCamelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase__ )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
A__ = 0
A__ = 0
A__ = tqdm(UpperCamelCase__ , desc='''Training''' )
for step, batch in enumerate(UpperCamelCase__ ):
A__ = tuple(t.to(UpperCamelCase__ ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = '''Training loss: {:.2e} lr: {:.2e}'''.format(UpperCamelCase__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(UpperCamelCase__ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
torch.save(model_to_save.state_dict() , UpperCamelCase__ )
model_to_save.config.to_json_file(UpperCamelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase__ )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(UpperCamelCase__ , desc='''Evaluating''' ):
A__ = tuple(t.to(UpperCamelCase__ ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to('''cpu''' ).numpy()
A__ = accuracy(UpperCamelCase__ , UpperCamelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
A__ = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCamelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 705
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase__( UpperCamelCase__ : Callable[[int | float], int | float] , UpperCamelCase__ : int | float , UpperCamelCase__ : int | float , UpperCamelCase__ : int = 1_00 , )->float:
A__ = x_start
A__ = fnc(UpperCamelCase__ )
A__ = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
A__ = (x_end - x_start) / steps + xa
A__ = fnc(UpperCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A__ = xa
A__ = fxa
return length
if __name__ == "__main__":
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[Any]:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
a__: List[str] = 10
while i <= 100_000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 212
| 0
|
from functools import reduce
a_ :Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( A__ = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda A__ , A__ : str(int(A__ ) * int(A__ ) ) , n[i : i + 1_3] ) )
for i in range(len(A__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35
| 1
|
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str ):
snake_case__ : List[str] = psutil.Process()
snake_case__ : int = False
def _lowercase ( self : int ):
snake_case__ : List[str] = -1
while True:
snake_case__ : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = True
snake_case__ : int = threading.Thread(target=self.peak_monitor )
snake_case__ : Dict = True
self.thread.start()
def _lowercase ( self : Optional[int] ):
snake_case__ : str = False
self.thread.join()
return self.cpu_memory_peak
__lowerCamelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ):
# Time
snake_case__ : int = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : Dict = torch.cuda.memory_allocated(snake_case_ )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# Time
snake_case__ : Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : int = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
snake_case__ : Dict = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : Dict = (torch.cuda.memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
snake_case__ : Optional[Any] = (torch.cuda.max_memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(snake_case_ )]:.2f}MiB''' )
snake_case__ : Tuple = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 25
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25
| 1
|
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
# Base Case
if curr_ind == len(_snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 ,len(_snake_case ) ):
if valid_connection(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# Insert current vertex into path as next transition
UpperCAmelCase__ : str = next_ver
# Validate created path
if util_hamilton_cycle(_snake_case ,_snake_case ,curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase__ : int = -1
return False
def lowerCamelCase ( _snake_case ,_snake_case = 0 ):
UpperCAmelCase__ : str = [-1] * (len(_snake_case ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase__ : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_snake_case ,_snake_case ,1 ) else []
| 110
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_a = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowerCamelCase__ = self.diffusers_dir
shutil.copy(
os.path.join(__lowerCAmelCase , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
lowerCamelCase__ = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
lowerCamelCase__ = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
lowerCamelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowerCamelCase__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
lowerCamelCase__ = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(__lowerCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , __lowerCAmelCase ) , )
# Copy consistency with a really long name
lowerCamelCase__ = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , __lowerCAmelCase , overwrite_result=re.sub('''DDPM''' , '''Test''' , __lowerCAmelCase ) , )
| 705
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase__ = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def a_ (__A , __A ) -> Optional[Any]:
"""simple docstring"""
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def a_ (__A ) -> Union[str, Any]:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_A )
def a_ (__A , __A ) -> Dict:
"""simple docstring"""
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__a : Any = tmp_path_factory.getbasetemp() / 'cache'
__a : Dict = test_hf_cache_home / 'datasets'
__a : List[Any] = test_hf_cache_home / 'metrics'
__a : Tuple = test_hf_cache_home / 'modules'
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_A ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_A ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_A ) )
__a : List[Any] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_A ) )
__a : int = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_A ) )
@pytest.fixture(autouse=_A , scope="session" )
def a_ () -> str:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def a_ (__A ) -> Tuple:
"""simple docstring"""
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _A )
@pytest.fixture
def a_ (__A ) -> List[str]:
"""simple docstring"""
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _A )
| 351
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: List[str] = logging.get_logger(__name__)
lowerCAmelCase: Union[str, Any] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class a__( lowerCamelCase__ ):
lowercase__ = """lilt"""
def __init__( self : str , __snake_case : Dict=3_05_22 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=12 , __snake_case : Dict=30_72 , __snake_case : Tuple="gelu" , __snake_case : Dict=0.1 , __snake_case : List[Any]=0.1 , __snake_case : List[str]=5_12 , __snake_case : List[str]=2 , __snake_case : str=0.02 , __snake_case : Dict=1e-1_2 , __snake_case : int=0 , __snake_case : Any="absolute" , __snake_case : Tuple=None , __snake_case : Tuple=4 , __snake_case : List[str]=10_24 , **__snake_case : Union[str, Any] , ):
super().__init__(pad_token_id=__snake_case , **__snake_case )
a : str = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Tuple = num_attention_heads
a : Optional[Any] = hidden_act
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Any = type_vocab_size
a : Union[str, Any] = initializer_range
a : int = layer_norm_eps
a : Any = position_embedding_type
a : Any = classifier_dropout
a : str = channel_shrink_ratio
a : Optional[Any] = max_ad_position_embeddings
| 526
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase ( A__ ):
"""simple docstring"""
_a = ['pixel_values']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ :Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='''crop_size''' )
UpperCamelCase__ :Any = do_resize
UpperCamelCase__ :Union[str, Any] = size
UpperCamelCase__ :Any = resample
UpperCamelCase__ :Optional[Any] = do_center_crop
UpperCamelCase__ :List[str] = crop_size
UpperCamelCase__ :Optional[int] = do_rescale
UpperCamelCase__ :Optional[Any] = rescale_factor
UpperCamelCase__ :Any = do_normalize
UpperCamelCase__ :int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ :Union[str, Any] = do_convert_rgb
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ :str = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :Optional[Any] = size if size is not None else self.size
UpperCamelCase__ :Optional[int] = get_size_dict(UpperCamelCase_ , param_name='''size''' , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :Dict = resample if resample is not None else self.resample
UpperCamelCase__ :int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ :Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ :Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ :Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :Tuple = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ :str = image_std if image_std is not None else self.image_std
UpperCamelCase__ :Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ :str = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ :Any = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ :str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ :Optional[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase__ :Dict = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ :Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ :Tuple = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCamelCase__ :List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase__ :Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 280
|
'''simple docstring'''
import qiskit
def a ( __a , __a ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCamelCase__ :int = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ :Any = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase__ :Optional[int] = qiskit.execute(__a , __a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 280
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = "git_vision_model"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]=768 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : str=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=224 , SCREAMING_SNAKE_CASE_ : Dict=16 , SCREAMING_SNAKE_CASE_ : List[Any]="quick_gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.0_2 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = num_channels
__snake_case = patch_size
__snake_case = image_size
__snake_case = initializer_range
__snake_case = attention_dropout
__snake_case = layer_norm_eps
__snake_case = hidden_act
@classmethod
def a ( cls : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
__snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=3_0522 , SCREAMING_SNAKE_CASE_ : Optional[Any]=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=6 , SCREAMING_SNAKE_CASE_ : Any=12 , SCREAMING_SNAKE_CASE_ : Tuple=3072 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1024 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : str=1e-12 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : int="absolute" , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[Any]=101 , SCREAMING_SNAKE_CASE_ : Dict=102 , SCREAMING_SNAKE_CASE_ : str=None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Tuple:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
__snake_case = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
__snake_case = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = tie_word_embeddings
__snake_case = num_image_with_embedding
__snake_case = bos_token_id
__snake_case = eos_token_id
def a ( self : Optional[Any] ) -> List[Any]:
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 56
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
| 90
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _lowerCAmelCase( ) -> List[str]:
lowerCAmelCase__ = 9, 14 # noqa: F841
lowerCAmelCase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase__ = defaultdict(_lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCAmelCase__ = mst(_lowercase )
lowerCAmelCase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCAmelCase__ = tuple(answer[:2] )
lowerCAmelCase__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 704
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_UpperCamelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
_UpperCamelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def lowercase__ ( self : Optional[int] , __A : str , __A : Optional[Any] , __A : Dict=None , __A : Union[str, Any]=1 , __A : Tuple="binary" , __A : str=None ) -> str:
'''simple docstring'''
lowerCAmelCase__ = fa_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A )
return {"f1": float(__A ) if score.size == 1 else score}
| 211
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
# Initialise PyTorch model
lowercase_ = TaConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 412
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[Any] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1e-2
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**UpperCamelCase__ )
model.to(UpperCamelCase__ )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(UpperCamelCase__ )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**UpperCamelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCamelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**UpperCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowercase_ = model.to(UpperCamelCase__ )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , sample_posterior=UpperCamelCase__ , generator=UpperCamelCase__ ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase_ = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str=0 , UpperCamelCase__ : str=(4, 3, 512, 512) , UpperCamelCase__ : str=False ):
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) ).to(UpperCamelCase__ ).to(UpperCamelCase__ )
return image
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple="CompVis/stable-diffusion-v1-4" , UpperCamelCase__ : Any=False ):
'''simple docstring'''
lowercase_ = """fp16""" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
UpperCamelCase__ , subfolder="""vae""" , torch_dtype=UpperCamelCase__ , revision=UpperCamelCase__ , )
model.to(UpperCamelCase__ ).eval()
return model
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(UpperCamelCase__ )
return torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , fpaa=UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , generator=UpperCamelCase__ , sample_posterior=UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCamelCase__ )
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCamelCase__ )
lowercase_ = self.get_generator(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model.encode(UpperCamelCase__ ).latent_dist
lowercase_ = dist.sample(generator=UpperCamelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(UpperCamelCase__ )
lowercase_ = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=UpperCamelCase__ )
| 412
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase__ ( lowercase__ ) -> Tuple:
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("""_""" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 96
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 12, 24)
elif model_size == "small":
__lowercase = 96
__lowercase = (2, 2, 18, 2)
__lowercase = (3, 6, 12, 24)
elif model_size == "base":
__lowercase = 128
__lowercase = (2, 2, 18, 2)
__lowercase = (4, 8, 16, 32)
else:
__lowercase = 192
__lowercase = (2, 2, 18, 2)
__lowercase = (6, 12, 24, 48)
if "to" in swinva_name:
__lowercase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 21_841
__lowercase = '''huggingface/label-files'''
__lowercase = '''imagenet-22k-id2label.json'''
__lowercase = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(a_ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_000
__lowercase = '''huggingface/label-files'''
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(a_ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def UpperCAmelCase__ ( lowercase__ ) -> Tuple:
if "patch_embed.proj" in name:
__lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowercase = '''encoder.''' + name
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__lowercase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__lowercase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__lowercase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__lowercase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
__lowercase = '''layernorm.weight'''
if name == "norm.bias":
__lowercase = '''layernorm.bias'''
if "head" in name:
__lowercase = name.replace("""head""" , """classifier""" )
else:
__lowercase = '''swinv2.''' + name
return name
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> str:
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(a_ )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split(""".""" )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
__lowercase = timm.create_model(a_ , pretrained=a_ )
timm_model.eval()
__lowercase = get_swinva_config(a_ )
__lowercase = SwinvaForImageClassification(a_ )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , a_ )
model.load_state_dict(a_ )
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
__lowercase = Image.open(requests.get(a_ , stream=a_ ).raw )
__lowercase = image_processor(images=a_ , return_tensors="""pt""" )
__lowercase = timm_model(inputs["""pixel_values"""] )
__lowercase = model(**a_ ).logits
assert torch.allclose(a_ , a_ , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 718
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634
| 0
|
'''simple docstring'''
def snake_case ( snake_case : list , snake_case : list ) -> float:
"""simple docstring"""
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__lowercase , __lowercase ) ) )
def snake_case ( snake_case : list[float] ) -> None:
"""simple docstring"""
if point:
if isinstance(__lowercase , __lowercase ):
for item in point:
if not isinstance(__lowercase , (int, float) ):
lowerCAmelCase = (
'Expected a list of numbers as input, found '
F'{type(__lowercase ).__name__}'
)
raise TypeError(__lowercase )
else:
lowerCAmelCase = F'Expected a list of numbers as input, found {type(__lowercase ).__name__}'
raise TypeError(__lowercase )
else:
raise ValueError('Missing an input' )
def snake_case ( snake_case : list , snake_case : list ) -> float:
"""simple docstring"""
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__lowercase , __lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] =logging.get_logger(__name__)
set_seed(770)
a__ : str ={
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
a__ : str ={
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
a__ : Dict =os.path.dirname(os.path.abspath(__file__))
a__ : str =os.path.join(os.path.expanduser('''~'''), '''.cache''')
a__ : str =os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowercase__ ( __lowercase : List[str] , __lowercase : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(__lowercase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowercase__ ( __lowercase : Optional[int] , __lowercase : int ) -> str:
"""simple docstring"""
os.makedirs(__lowercase , exist_ok=__lowercase )
hf_hub_download(repo_id=__lowercase , filename=__lowercase , local_dir=__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int]=False , __lowercase : Tuple="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
__UpperCamelCase = BarkSemanticModel
__UpperCamelCase = BarkSemanticConfig
__UpperCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase = BarkCoarseModel
__UpperCamelCase = BarkCoarseConfig
__UpperCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase = BarkFineModel
__UpperCamelCase = BarkFineConfig
__UpperCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase = F'''{model_type}_small''' if use_small else model_type
__UpperCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowercase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
__UpperCamelCase = torch.load(__lowercase , map_location=__lowercase )
# this is a hack
__UpperCamelCase = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__UpperCamelCase = model_args['vocab_size']
__UpperCamelCase = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase = model_args.pop('n_head' )
__UpperCamelCase = model_args.pop('n_embd' )
__UpperCamelCase = model_args.pop('n_layer' )
__UpperCamelCase = ConfigClass(**checkpoint['model_args'] )
__UpperCamelCase = ModelClass(config=__lowercase )
__UpperCamelCase = GenerationConfigClass()
__UpperCamelCase = model_generation_config
__UpperCamelCase = checkpoint['model']
# fixup checkpoint
__UpperCamelCase = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(__lowercase ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase = k[len(__lowercase ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase = new_k.replace(__lowercase , new_layer_name_dict[old_layer_name] )
__UpperCamelCase = state_dict.pop(__lowercase )
__UpperCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__UpperCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(__lowercase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(__lowercase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(__lowercase , strict=__lowercase )
__UpperCamelCase = model.num_parameters(exclude_embeddings=__lowercase )
__UpperCamelCase = checkpoint['best_val_loss'].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowercase , 3 )} loss''' )
model.eval()
model.to(__lowercase )
del checkpoint, state_dict
return model
def lowercase__ ( __lowercase : List[Any] , __lowercase : Any=False , __lowercase : List[Any]="text" ) -> int:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase = 'cpu' # do conversion on cpu
__UpperCamelCase = _get_ckpt_path(__lowercase , use_small=__lowercase )
__UpperCamelCase = _load_model(__lowercase , __lowercase , model_type=__lowercase , use_small=__lowercase )
# load bark initial model
__UpperCamelCase = _bark_load_model(__lowercase , 'cpu' , model_type=__lowercase , use_small=__lowercase )
if model_type == "text":
__UpperCamelCase = bark_model['model']
if model.num_parameters(exclude_embeddings=__lowercase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__UpperCamelCase = 5
__UpperCamelCase = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase = bark_model(__lowercase )[0]
__UpperCamelCase = model(__lowercase )
# take last logits
__UpperCamelCase = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase = 3
__UpperCamelCase = 8
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase = model(__lowercase , __lowercase )
__UpperCamelCase = bark_model(__lowercase , __lowercase )
__UpperCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[Any] , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
__UpperCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkFineConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkSemanticModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkCoarseModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkFineModel.from_pretrained(__lowercase )
__UpperCamelCase = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkConfig.from_sub_model_configs(
__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase = BarkModel(__lowercase )
__UpperCamelCase = semantic
__UpperCamelCase = coarseAcoustic
__UpperCamelCase = fineAcoustic
__UpperCamelCase = codec
__UpperCamelCase = bark_generation_config
Path(__lowercase ).mkdir(exist_ok=__lowercase )
bark.save_pretrained(__lowercase , repo_id=__lowercase , push_to_hub=__lowercase )
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
a__ : Optional[int] =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 399
| 0
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase = object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase = object()
def lowerCAmelCase_ ( __A : List[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Dict = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
snake_case: List[Any] = [x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
def replace(__A : Optional[int] , __A : Optional[int] ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def lowerCAmelCase_ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __A )),
(("transformer", "wte", "embedding"), P('mp' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: str = _get_partition_rules()
snake_case: str = _replacement_rules(__A )
snake_case: int = {k: _unmatched for k in flatten_dict(__A )}
snake_case: List[Any] = {k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) )
| 692
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = tempfile.mkdtemp()
snake_case: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: Optional[int] = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Union[str, Any] = self.get_image_processor()
snake_case: List[str] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Tuple = self.get_tokenizer()
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.prepare_image_inputs()
snake_case: List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Tuple = self.prepare_image_inputs()
snake_case: Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: int = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: Dict = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = 'Alexandra,T-shirt的价格是15便士。'
snake_case: List[Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 692
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str:
"""simple docstring"""
lowerCamelCase__: int =bnb_quantization_config.load_in_abit
lowerCamelCase__: Any =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCamelCase__: List[Any] =[]
# custom device map
if isinstance(__a , __a ) and len(device_map.keys() ) > 1:
lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__: Any =get_keys_to_not_convert(__a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__a )
lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__a )
# compatibility with peft
lowerCamelCase__: List[str] =load_in_abit
lowerCamelCase__: int =load_in_abit
lowerCamelCase__: Tuple =get_parameter_device(__a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a )
# convert param to the right dtype
lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__a ):
param.to(__a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCamelCase__: str =replace_with_bnb_layers(
__a , __a , modules_to_not_convert=__a )
lowerCamelCase__: Optional[Any] =get_quantized_model_device_map(
__a , __a , __a , max_memory=__a , no_split_module_classes=__a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__: Any =True
lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__a , device_map=__a , offload_dir=__a )
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__: str ={"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__a , __a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCamelCase__: Optional[int] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__: Optional[Any] ={}
lowerCamelCase__: str =special_dtypes
lowerCamelCase__: List[str] =no_split_module_classes
lowerCamelCase__: Dict =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__: Optional[Any] =get_balanced_memory(
__a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , )
lowerCamelCase__: Union[str, Any] =max_memory
lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a )
if isinstance(__a , __a ):
# check if don't have any quantized module on the cpu
lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__: List[Any] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__: Optional[Any] =[]
current_key_name.append(__a )
if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__: List[str] =".".join(__a )
lowerCamelCase__: Optional[Any] =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__: int =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__: Dict =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCamelCase__: Dict =module.weight.data
if module.bias is not None:
lowerCamelCase__: List[Any] =module.bias.data
bnb_module.requires_grad_(__a )
setattr(__a , __a , __a )
lowerCamelCase__: int =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers(
__a , __a , __a , __a )
lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
with init_empty_weights():
lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__: str =find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__: str =sum(__a , [] )
lowerCamelCase__: str =len(__a ) > 0
# Check if it is a base model
lowerCamelCase__: Optional[Any] =False
if hasattr(__a , "base_model_prefix" ):
lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__: Optional[int] =list(model.named_children() )
lowerCamelCase__: Optional[int] =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a )
lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
lowerCamelCase__: List[Any] =[".weight", ".bias"]
lowerCamelCase__: Tuple =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__: Optional[Any] =name.replace(__a , "" )
filtered_module_names.append(__a )
return filtered_module_names
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
for m in model.modules():
if isinstance(__a , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a )
lowerCamelCase__: Dict =param_name
lowerCamelCase__: Tuple =model
if "." in tensor_name:
lowerCamelCase__: Any =tensor_name.split("." )
for split in splits[:-1]:
lowerCamelCase__: Any =getattr(__a , __a )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCamelCase__: str =new_module
lowerCamelCase__: int =splits[-1]
# offload weights
lowerCamelCase__: str =False
offload_weight(module._parameters[tensor_name] , __a , __a , index=__a )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , )
else:
offload_weight(__a , __a , __a , index=__a )
offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a )
set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
| 59
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : str = '''mobilenet_v1'''
def __init__( self , a_=3 , a_=224 , a_=1.0 , a_=8 , a_="relu6" , a_=True , a_=0.9_99 , a_=0.02 , a_=0.0_01 , **a_ , ):
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase_ : Any = num_channels
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : str = depth_multiplier
lowerCamelCase_ : Dict = min_depth
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : int = tf_padding
lowerCamelCase_ : Dict = classifier_dropout_prob
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : str = layer_norm_eps
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : Tuple = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _UpperCamelCase ( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _UpperCamelCase ( self ):
return 1E-4
| 704
|
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 0
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not sentence:
return ""
A_ : Union[str, Any] = dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 667
|
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 667
| 1
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _lowerCAmelCase ( __a ) -> Any:
'''simple docstring'''
_UpperCamelCase :str ={}
_UpperCamelCase :Dict =tokenizer(example["""content"""] , truncation=__a )["""input_ids"""]
_UpperCamelCase :int =len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_lowerCamelCase : int = HfArgumentParser(PretokenizationArguments)
_lowerCamelCase : Optional[Any] = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : Any = multiprocessing.cpu_count()
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Dict = load_dataset(args.dataset_name, split="""train""")
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
_lowerCamelCase : Dict = time.time()
_lowerCamelCase : List[str] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
_lowerCamelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 712
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=__snake_case ):
__UpperCAmelCase = ["""torch""", """scipy"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def _UpperCamelCase ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def _UpperCamelCase ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """scipy"""] )
| 512
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True})
__SCREAMING_SNAKE_CASE = Features({'''text''': Value('''string''')})
__SCREAMING_SNAKE_CASE = Features({})
__SCREAMING_SNAKE_CASE = "text"
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 601
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ,__A ,__A ,__A ,):
'''simple docstring'''
__UpperCamelCase = len(__A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,__A ,__A ,)
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = []
depth_first_search([] ,[] ,[] ,__A ,__A )
# Print all the boards
for board in boards:
for column in board:
print(__A )
print("""""" )
print(len(__A ) ,"""solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 601
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__snake_case = 'Wav2Vec2FeatureExtractor'
__snake_case = 'AutoTokenizer'
def __init__( self , _lowercase , _lowercase ) -> Dict:
super().__init__(_a , _a )
_lowerCamelCase : Any = self.feature_extractor
_lowerCamelCase : List[Any] = False
@classmethod
def a__ ( cls , _lowercase , **_lowercase ) -> int:
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _a , )
_lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self , *_lowercase , **_lowercase ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : Dict = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : List[str] = kwargs.pop('''audio''' , _a )
_lowerCamelCase : str = kwargs.pop('''sampling_rate''' , _a )
_lowerCamelCase : Dict = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
_lowerCamelCase : Optional[Any] = args[0]
_lowerCamelCase : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : Dict = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_lowerCamelCase : Optional[Any] = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : Optional[int] = encodings['''input_ids''']
return inputs
def a__ ( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_lowerCamelCase : Any = kwargs.pop('''input_features''' , _a )
_lowerCamelCase : List[Any] = kwargs.pop('''labels''' , _a )
if len(_a ) > 0:
_lowerCamelCase : Optional[int] = args[0]
_lowerCamelCase : Optional[int] = args[1:]
if input_features is not None:
_lowerCamelCase : Optional[int] = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_lowerCamelCase : int = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCamelCase : Union[str, Any] = labels['''input_ids''']
return input_features
def a__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_lowercase , **_lowercase ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def a__ ( self ) -> str:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Dict = True
_lowerCamelCase : Union[str, Any] = self.tokenizer
yield
_lowerCamelCase : List[Any] = self.feature_extractor
_lowerCamelCase : Optional[Any] = False
| 721
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = False , ) -> Tuple:
super().__init__()
_lowerCamelCase : Tuple = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Dict = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = nn.Dropout(p=_lowercase )
_lowerCamelCase : List[Any] = TaConfig(
vocab_size=_lowercase , d_model=_lowercase , num_heads=_lowercase , d_kv=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , feed_forward_proj=_lowercase , is_decoder=_lowercase , is_encoder_decoder=_lowercase , )
_lowerCamelCase : List[Any] = nn.ModuleList()
for lyr_num in range(_lowercase ):
_lowerCamelCase : Tuple = TaBlock(_lowercase )
self.encoders.append(_lowercase )
_lowerCamelCase : str = TaLayerNorm(_lowercase )
_lowerCamelCase : List[Any] = nn.Dropout(p=_lowercase )
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
_lowerCamelCase : List[Any] = self.token_embedder(_lowercase )
_lowerCamelCase : Union[str, Any] = encoder_input_tokens.shape[1]
_lowerCamelCase : int = torch.arange(_lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowercase )
_lowerCamelCase : Tuple = self.dropout_pre(_lowercase )
# inverted the attention mask
_lowerCamelCase : int = encoder_input_tokens.size()
_lowerCamelCase : Union[str, Any] = self.get_extended_attention_mask(_lowercase , _lowercase )
for lyr in self.encoders:
_lowerCamelCase : List[Any] = lyr(_lowercase , _lowercase )[0]
_lowerCamelCase : str = self.layer_norm(_lowercase )
return self.dropout_post(_lowercase ), encoder_inputs_mask
| 558
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.