code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Load checkpoint
__lowerCamelCase : int = torch.load(snake_case_ , map_location='cpu' )
__lowerCamelCase : Any = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__lowerCamelCase : List[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCamelCase : Optional[Any] = v
else:
__lowerCamelCase : Any = v
__lowerCamelCase : List[str] = chkpt["params"]
__lowerCamelCase : Tuple = {n: v for n, v in config.items() if not isinstance(snake_case_ , (torch.FloatTensor, numpy.ndarray) )}
__lowerCamelCase : Any = chkpt["dico_word2id"]
__lowerCamelCase : Optional[Any] = {s + "</w>" if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCamelCase : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCamelCase : str = pytorch_dump_folder_path + "/" + CONFIG_NAME
__lowerCamelCase : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case_ , snake_case_ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + '\n' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + '\n' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 669
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["image_processor", "tokenizer"]
a_ = "FlavaImageProcessor"
a_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple , __A : int=None , __A : Optional[Any]=None , **__A : Dict ):
snake_case__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case__ : Tuple = kwargs.pop("feature_extractor" )
snake_case__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
snake_case__ : Optional[int] = self.image_processor
def __call__( self : Dict , __A : Optional[ImageInput] = None , __A : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = False , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : Dict , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case__ : Dict = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
if images is not None:
snake_case__ : int = self.image_processor(
__A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , )
if text is not None and images is not None:
encoding.update(__A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _lowercase ( self : str , *__A : List[Any] , **__A : Dict ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowercase ( self : Optional[Any] , *__A : Optional[int] , **__A : List[Any] ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.tokenizer.model_input_names
snake_case__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _lowercase ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 297
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , """Tatoeba directory does not exist.""" )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
UpperCamelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
self.resolver.convert_models(["""heb-eng"""] )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 548
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCamelCase (a__ :str , a__ :complex , a__ :str = "x" , a__ :float = 10**-10 , a__ :int = 1 , ):
"""simple docstring"""
UpperCamelCase__ = symbols(a__ )
UpperCamelCase__ = lambdify(a__ , a__ )
UpperCamelCase__ = lambdify(a__ , diff(a__ , a__ ) )
UpperCamelCase__ = starting_point
while True:
if diff_function(a__ ) != 0:
UpperCamelCase__ = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 548
| 1
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
__a : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
__a : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCamelCase_ , )
is not None
):
__a : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__a : Dict = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__a : List[Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
__a : Optional[Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
__a : Optional[Any] = True
if not attribute_used:
__a : Any = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__a : int = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__a : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__a : Tuple = True
elif attribute.endswith('_token_id' ):
__a : Dict = True
# configuration class specific cases
if not case_allowed:
__a : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
__a : str = dict(inspect.signature(config_class.__init__ ).parameters )
__a : Optional[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
__a : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__a : Any = {}
if len(config_class.attribute_map ) > 0:
__a : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__a : Union[str, Any] = inspect.getsourcefile(lowerCamelCase_ )
__a : str = os.path.dirname(lowerCamelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__a : Dict = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for fn in os.listdir(lowerCamelCase_ ) if fn.startswith('modeling_' )]
# Get the source code strings
__a : str = []
for path in modeling_paths:
if os.path.isfile(lowerCamelCase_ ):
with open(lowerCamelCase_ ) as fp:
modeling_sources.append(fp.read() )
__a : str = []
for config_param, default_value in zip(lowerCamelCase_ , lowerCamelCase_ ):
# `attributes` here is all the variant names for `config_param`
__a : Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__a : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCamelCase_ : inspect.isclass(lowerCamelCase_ )
and issubclass(lowerCamelCase_ , lowerCamelCase_ )
and inspect.getmodule(lowerCamelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__a : List[Any] = check_config_attributes_being_used(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__a : Optional[Any] = unused_attributes
if len(lowerCamelCase_ ) > 0:
__a : str = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 47
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : List[str] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : str = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : Union[str, Any] = use_cache
__a : str = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase = DebertaTokenizer
UpperCAmelCase = True
UpperCAmelCase = DebertaTokenizerFast
def a_ ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "[UNK]"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def a_ ( self , **__UpperCamelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def a_ ( self , __UpperCamelCase ) -> Any:
_a = "lower newer"
_a = "lower newer"
return input_text, output_text
def a_ ( self ) -> List[str]:
_a = self.get_tokenizer()
_a = "lower newer"
_a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_a = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def a_ ( self ) -> Optional[int]:
_a = self.get_tokenizer()
_a = tokenizer("Hello" , "World" )
_a = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , __UpperCamelCase )
@slow
def a_ ( self ) -> Dict:
_a = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
_a = tokenizer.encode(
"sequence builders" , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def a_ ( self ) -> Optional[int]:
_a = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_a = tokenizer_class.from_pretrained("microsoft/deberta-base" )
_a = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_a = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
_a = [tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) for seq in encoding["input_ids"]]
# fmt: off
_a = {
"input_ids": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_a = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , __UpperCamelCase )
for expected, decoded in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 276
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase__ = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase = None ) -> List[str]:
_a = (
os.path.join(__UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_a = Extractor
def a_ ( self , __UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_a = os.path.abspath(__UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(__UpperCamelCase ) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(__UpperCamelCase ) and not (os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ))
)
def a_ ( self , __UpperCamelCase , __UpperCamelCase = False ) -> str:
_a = self.extractor.infer_extractor_format(__UpperCamelCase )
if not extractor_format:
return input_path
_a = self._get_output_path(__UpperCamelCase )
if self._do_extract(__UpperCamelCase , __UpperCamelCase ):
self.extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return output_path
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@classmethod
@abstractmethod
def a_ ( cls , __UpperCamelCase , **__UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
...
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase = []
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
with open(__UpperCamelCase , "rb" ) as f:
return f.read(__UpperCamelCase )
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase = b"" ) -> bool:
if not magic_number:
_a = max(len(__UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
_a = cls.read_magic_number(__UpperCamelCase , __UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(__UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@classmethod
def a_ ( cls , __UpperCamelCase , **__UpperCamelCase ) -> bool:
return tarfile.is_tarfile(__UpperCamelCase )
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> str:
def resolved(__UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(__UpperCamelCase ) )
def badpath(__UpperCamelCase , __UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__UpperCamelCase , __UpperCamelCase ) ).startswith(__UpperCamelCase )
def badlink(__UpperCamelCase , __UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
_a = resolved(os.path.join(__UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__UpperCamelCase )
_a = resolved(__UpperCamelCase )
for finfo in members:
if badpath(finfo.name , __UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(__UpperCamelCase , __UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(__UpperCamelCase , __UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
_a = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase , members=TarExtractor.safemembers(__UpperCamelCase , __UpperCamelCase ) )
tar_file.close()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x1F\x8B''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
with gzip.open(__UpperCamelCase , "rb" ) as gzip_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase = b"" ) -> bool:
if super().is_extractable(__UpperCamelCase , magic_number=__UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__UpperCamelCase , "rb" ) as fp:
_a = _EndRecData(__UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_a = fp.read(__UpperCamelCase ) # CD is where we expect it to be
if len(__UpperCamelCase ) == sizeCentralDir:
_a = struct.unpack(__UpperCamelCase , __UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with zipfile.ZipFile(__UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
with lzma.open(__UpperCamelCase ) as compressed_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
_a = rarfile.RarFile(__UpperCamelCase )
rf.extractall(__UpperCamelCase )
rf.close()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_a = zstd.ZstdDecompressor()
with open(__UpperCamelCase , "rb" ) as ifh, open(__UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x42\x5A\x68''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
with bza.open(__UpperCamelCase , "rb" ) as compressed_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with pyazr.SevenZipFile(__UpperCamelCase , "r" ) as archive:
archive.extractall(__UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x04\x22\x4D\x18''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__UpperCamelCase , "rb" ) as compressed_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCAmelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a_ ( cls ) -> str:
return max(
len(__UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(__UpperCamelCase , __UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
try:
return MagicNumberBaseExtractor.read_magic_number(__UpperCamelCase , magic_number_length=__UpperCamelCase )
except OSError:
return b""
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__UpperCamelCase , )
_a = cls.infer_extractor_format(__UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a_ ( cls , __UpperCamelCase ) -> str: # <Added version="2.4.0"/>
_a = cls._get_magic_number_max_length()
_a = cls._read_magic_number(__UpperCamelCase , __UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__UpperCamelCase , magic_number=__UpperCamelCase ):
return extractor_format
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(__UpperCamelCase ) , exist_ok=__UpperCamelCase )
# Prevent parallel extractions
_a = str(Path(__UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase , ignore_errors=__UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__UpperCamelCase , __UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__UpperCamelCase , )
_a = extractor if extractor != "deprecated" else extractor_format
else:
_a = cls.extractors[extractor_format]
return extractor.extract(__UpperCamelCase , __UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__UpperCamelCase ):
return extractor.extract(__UpperCamelCase , __UpperCamelCase )
| 276
| 1
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_UpperCamelCase : Union[str, Any] =''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class UpperCAmelCase__ ( tr.AbstractTransform ):
def __init__( self ,A__ = " " ):
_A : Any = sentence_delimiter
def A__ ( self ,A__ ):
return list(A__ )
def A__ ( self ,A__ ):
_A : Any = []
for sent_idx, sentence in enumerate(A__ ):
chars.extend(self.process_string(A__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(A__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_UpperCamelCase : List[Any] =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_UpperCamelCase : Any =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_UpperCamelCase : Tuple ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_UpperCamelCase : Any ='\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
_UpperCamelCase : int ='\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/jitsi/jiwer/'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] ,)
def A__ ( self ,A__ ,A__ ,A__=False ):
if concatenate_texts:
return jiwer.compute_measures(
A__ ,A__ ,truth_transform=A__ ,hypothesis_transform=A__ ,)["wer"]
_A : Union[str, Any] = 0
_A : Optional[Any] = 0
for prediction, reference in zip(A__ ,A__ ):
_A : List[Any] = jiwer.compute_measures(
A__ ,A__ ,truth_transform=A__ ,hypothesis_transform=A__ ,)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 206
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Any =logging.get_logger(__name__)
set_seed(770)
_UpperCamelCase : List[Any] ={
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_UpperCamelCase : Tuple ={
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_UpperCamelCase : Any =os.path.dirname(os.path.abspath(__file__))
_UpperCamelCase : Tuple =os.path.join(os.path.expanduser('~'), '.cache')
_UpperCamelCase : Dict =os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def a__ (__lowercase :int , __lowercase :Any=False ) -> Union[str, Any]:
_A : str = model_type
if use_small:
key += "_small"
return os.path.join(__lowercase , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def a__ (__lowercase :str , __lowercase :Optional[Any] ) -> Dict:
os.makedirs(__lowercase , exist_ok=__lowercase )
hf_hub_download(repo_id=__lowercase , filename=__lowercase , local_dir=__lowercase )
def a__ (__lowercase :Union[str, Any] , __lowercase :List[str] , __lowercase :str=False , __lowercase :int="text" ) -> int:
if model_type == "text":
_A : Optional[Any] = BarkSemanticModel
_A : Any = BarkSemanticConfig
_A : Dict = BarkSemanticGenerationConfig
elif model_type == "coarse":
_A : Dict = BarkCoarseModel
_A : str = BarkCoarseConfig
_A : int = BarkCoarseGenerationConfig
elif model_type == "fine":
_A : str = BarkFineModel
_A : Optional[Any] = BarkFineConfig
_A : int = BarkFineGenerationConfig
else:
raise NotImplementedError()
_A : Dict = f"""{model_type}_small""" if use_small else model_type
_A : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowercase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
_A : Optional[int] = torch.load(__lowercase , map_location=__lowercase )
# this is a hack
_A : Any = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
_A : Optional[Any] = model_args['''vocab_size''']
_A : Dict = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_A : List[str] = model_args.pop('''n_head''' )
_A : int = model_args.pop('''n_embd''' )
_A : Tuple = model_args.pop('''n_layer''' )
_A : Dict = ConfigClass(**checkpoint['''model_args'''] )
_A : Optional[Any] = ModelClass(config=__lowercase )
_A : Optional[Any] = GenerationConfigClass()
_A : Dict = model_generation_config
_A : int = checkpoint['''model''']
# fixup checkpoint
_A : str = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(__lowercase ):
# replace part of the key with corresponding layer name in HF implementation
_A : Optional[Any] = k[len(__lowercase ) :]
for old_layer_name in new_layer_name_dict:
_A : Any = new_k.replace(__lowercase , new_layer_name_dict[old_layer_name] )
_A : str = state_dict.pop(__lowercase )
_A : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_A : List[Any] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
_A : Union[str, Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_A : Any = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(__lowercase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(__lowercase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(__lowercase , strict=__lowercase )
_A : int = model.num_parameters(exclude_embeddings=__lowercase )
_A : Tuple = checkpoint['''best_val_loss'''].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowercase , 3 )} loss""" )
model.eval()
model.to(__lowercase )
del checkpoint, state_dict
return model
def a__ (__lowercase :Union[str, Any] , __lowercase :Any=False , __lowercase :Optional[int]="text" ) -> Dict:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_A : List[str] = '''cpu''' # do conversion on cpu
_A : Tuple = _get_ckpt_path(__lowercase , use_small=__lowercase )
_A : Any = _load_model(__lowercase , __lowercase , model_type=__lowercase , use_small=__lowercase )
# load bark initial model
_A : Union[str, Any] = _bark_load_model(__lowercase , '''cpu''' , model_type=__lowercase , use_small=__lowercase )
if model_type == "text":
_A : Any = bark_model['''model''']
if model.num_parameters(exclude_embeddings=__lowercase ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
_A : List[str] = 5
_A : List[str] = 10
if model_type in ["text", "coarse"]:
_A : str = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_A : Any = bark_model(__lowercase )[0]
_A : Optional[Any] = model(__lowercase )
# take last logits
_A : Dict = output_new_model_total.logits[:, [-1], :]
else:
_A : str = 3
_A : List[str] = 8
_A : List[Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_A : List[str] = model(__lowercase , __lowercase )
_A : Dict = bark_model(__lowercase , __lowercase )
_A : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
def a__ (__lowercase :Dict , __lowercase :List[str] , __lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Optional[int] , ) -> Union[str, Any]:
_A : List[Any] = os.path.join(__lowercase , __lowercase )
_A : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(__lowercase , '''config.json''' ) )
_A : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(__lowercase , '''config.json''' ) )
_A : Optional[int] = BarkFineConfig.from_pretrained(os.path.join(__lowercase , '''config.json''' ) )
_A : Any = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
_A : str = BarkSemanticModel.from_pretrained(__lowercase )
_A : Tuple = BarkCoarseModel.from_pretrained(__lowercase )
_A : List[str] = BarkFineModel.from_pretrained(__lowercase )
_A : Union[str, Any] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
_A : Tuple = BarkConfig.from_sub_model_configs(
__lowercase , __lowercase , __lowercase , __lowercase )
_A : Tuple = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_A : Tuple = BarkModel(__lowercase )
_A : Optional[Any] = semantic
_A : List[Any] = coarseAcoustic
_A : int = fineAcoustic
_A : List[Any] = codec
_A : Optional[Any] = bark_generation_config
Path(__lowercase ).mkdir(exist_ok=__lowercase )
bark.save_pretrained(__lowercase , repo_id=__lowercase , push_to_hub=__lowercase )
if __name__ == "__main__":
_UpperCamelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_UpperCamelCase : Dict =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 206
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 'rwkv'
_UpperCamelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , snake_case=50277 , snake_case=1024 , snake_case=4096 , snake_case=32 , snake_case=None , snake_case=None , snake_case=1E-5 , snake_case=0 , snake_case=0 , snake_case=6 , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
UpperCamelCase__ = vocab_size
UpperCamelCase__ = context_length
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCamelCase__ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = rescale_every
UpperCamelCase__ = use_cache
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = eos_token_id
super().__init__(
tie_word_embeddings=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 716
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=10 , snake_case=3 , snake_case=2 , snake_case=2 , snake_case=2 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=0.9 , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = tubelet_size
UpperCamelCase__ = num_frames
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = mask_ratio
UpperCamelCase__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
UpperCamelCase__ = int(mask_ratio * self.seq_length )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , )
def snake_case__ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = VideoMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = VideoMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCamelCase__ = torch.ones((self.num_masks,) )
UpperCamelCase__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
UpperCamelCase__ = mask.expand(self.batch_size , -1 ).bool()
UpperCamelCase__ = model(snake_case , snake_case )
# model only returns predictions for masked patches
UpperCamelCase__ = mask.sum().item()
UpperCamelCase__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : int = False
_UpperCamelCase : Any = False
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = VideoMAEModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def snake_case__ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCamelCase__ = copy.deepcopy(snake_case )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCamelCase__ = torch.ones((self.model_tester.num_masks,) )
UpperCamelCase__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
UpperCamelCase__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
UpperCamelCase__ = bool_masked_pos.to(snake_case )
if return_labels:
if model_class in [
*get_values(snake_case ),
]:
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = VideoMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
for model_class in self.all_model_classes:
UpperCamelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
UpperCamelCase__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCamelCase__ = len(snake_case )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 1 , len(snake_case ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case__ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCamelCase__ = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case ) , snake_case )
UpperCamelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
UpperCamelCase__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(snake_case , snake_case , snake_case )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_( )-> Union[str, Any]:
UpperCamelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCamelCase__ = np.load(_A )
return list(_A )
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
snake_case )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_video()
UpperCamelCase__ = image_processor(snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**snake_case )
# verify the logits
UpperCamelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase__ = torch.tensor([0.3669, -0.0688, -0.2421] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(snake_case )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_video()
UpperCamelCase__ = image_processor(snake_case , return_tensors="pt" ).to(snake_case )
# add boolean mask, indicating which patches to mask
UpperCamelCase__ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
UpperCamelCase__ = torch.load(snake_case )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**snake_case )
# verify the logits
UpperCamelCase__ = torch.Size([1, 1408, 1536] )
UpperCamelCase__ = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=snake_case )
self.assertEqual(outputs.logits.shape , snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
UpperCamelCase__ = torch.tensor([0.5142] , device=snake_case )
self.assertTrue(torch.allclose(outputs.loss , snake_case , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
UpperCamelCase__ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=snake_case ).to(
snake_case )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case )
UpperCamelCase__ = torch.tensor(torch.tensor([0.6469] ) , device=snake_case )
self.assertTrue(torch.allclose(outputs.loss , snake_case , atol=1E-4 ) )
| 185
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float )-> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> int:
A__ = '''huggingface/label-files'''
A__ = '''imagenet-1k-id2label.json'''
A__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A__ = BitConfig(
conv_layer=UpperCamelCase_ , num_labels=1_0_0_0 , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ , )
return config
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] )-> Optional[int]:
if "stem.conv" in name:
A__ = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
A__ = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
A__ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
A__ = '''bit.encoder.''' + name
return name
def lowerCAmelCase__ ( )-> str:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=False )-> Optional[int]:
A__ = get_config(UpperCamelCase_ )
# load original model from timm
A__ = create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ )
timm_model.eval()
# load state_dict of original model
A__ = timm_model.state_dict()
for key in state_dict.copy().keys():
A__ = state_dict.pop(UpperCamelCase_ )
A__ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
A__ = BitForImageClassification(UpperCamelCase_ )
model.eval()
model.load_state_dict(UpperCamelCase_ )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=UpperCamelCase_ ) )
A__ = transform.transforms
A__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
A__ = BitImageProcessor(
do_resize=UpperCamelCase_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=UpperCamelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(UpperCamelCase_ ).unsqueeze(0 )
A__ = processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
# verify logits
with torch.no_grad():
A__ = model(UpperCamelCase_ )
A__ = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
A__ = timm_model(UpperCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_lowercase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 632
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : list[float] ,lowerCamelCase_ : list[float]):
'''simple docstring'''
lowerCAmelCase__ : int = sorted(numsa + numsa)
lowerCAmelCase__ , lowerCAmelCase__ : str = divmod(len(lowerCamelCase_) ,2)
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[Any] =[float(x) for x in input('Enter the elements of first array: ').split()]
__snake_case : str =[float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 90
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =AutoencoderKL
snake_case_ ="""sample"""
snake_case_ =1e-2
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : List[Any] = (32, 32)
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
return {"sample": image}
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase__ : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' ,'''Gradient checkpointing skipped on MPS''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Tuple = self.model_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
lowerCAmelCase__ : Tuple = model(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCAmelCase__ : Optional[Any] = torch.randn_like(__lowerCamelCase )
lowerCAmelCase__ : Tuple = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCAmelCase__ : str = self.model_class(**__lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCAmelCase__ : Dict = model_a(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCAmelCase__ : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCAmelCase__ : List[str] = dict(model.named_parameters() )
lowerCAmelCase__ : Optional[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowerCAmelCase__ : Tuple = model.to(__lowerCamelCase )
model.eval()
if torch_device == "mps":
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
else:
lowerCAmelCase__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = image.to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,sample_posterior=__lowerCamelCase ,generator=__lowerCamelCase ).sample
lowerCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCAmelCase__ : List[str] = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowerCAmelCase__ : str = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowerCAmelCase__ : Any = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy"""
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ,__lowerCamelCase=(4, 3, 5_12, 5_12) ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCamelCase ,__lowerCamelCase ) ) ).to(__lowerCamelCase ).to(__lowerCamelCase )
return image
def lowerCAmelCase__ (self ,__lowerCamelCase="CompVis/stable-diffusion-v1-4" ,__lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = '''fp16''' if fpaa else None
lowerCAmelCase__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : List[Any] = AutoencoderKL.from_pretrained(
__lowerCamelCase ,subfolder='''vae''' ,torch_dtype=__lowerCamelCase ,revision=__lowerCamelCase ,)
model.to(__lowerCamelCase ).eval()
return model
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ) -> str:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__lowerCamelCase )
return torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_sd_vae_model()
lowerCAmelCase__ : Any = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_sd_image(__lowerCamelCase ,fpaa=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Any = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_sd_vae_model()
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_sd_vae_model()
lowerCAmelCase__ : Union[str, Any] = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : str = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCAmelCase__ : Any = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : str = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Any = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_sd_vae_model()
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_sd_vae_model()
lowerCAmelCase__ : Tuple = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model.encode(__lowerCamelCase ).latent_dist
lowerCAmelCase__ : int = dist.sample(generator=__lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCAmelCase__ : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
lowerCAmelCase__ : int = torch.tensor(__lowerCamelCase )
lowerCAmelCase__ : Any = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=__lowerCamelCase )
| 90
| 1
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , ):
_lowercase = size if size is not None else {"height": 18, "width": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
def UpperCamelCase_ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class a_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
a : Dict = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
_lowercase = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , """clusters""" ) )
self.assertTrue(hasattr(_snake_case , """do_resize""" ) )
self.assertTrue(hasattr(_snake_case , """size""" ) )
self.assertTrue(hasattr(_snake_case , """do_normalize""" ) )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
_lowercase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , obj[key] ) )
else:
self.assertEqual(obj[key] , _snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = os.path.join(_snake_case , """image_processor.json""" )
image_processor_first.to_json_file(_snake_case )
_lowercase = self.image_processing_class.from_json_file(_snake_case ).to_dict()
_lowercase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _snake_case )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_snake_case )
_lowercase = self.image_processing_class.from_pretrained(_snake_case ).to_dict()
_lowercase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _snake_case )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase__ ( ) -> List[Any]:
_lowercase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_lowercase = Image.open(dataset[4]["""file"""] )
_lowercase = Image.open(dataset[5]["""file"""] )
_lowercase = [imagea, imagea]
return images
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ):
_lowercase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowercase = prepare_images()
# test non-batched
_lowercase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
_lowercase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _snake_case )
# test batched
_lowercase = image_processing(_snake_case , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
_lowercase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _snake_case )
| 287
|
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71
| 0
|
def lowerCamelCase__ ( __lowerCAmelCase : Dict ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__lowerCAmelCase )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 279
| 0
|
from math import isqrt, loga
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : str = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__lowercase ,__lowercase ):
A_ : str = False
return [i for i in range(2 ,__lowercase ) if is_prime[i]]
def UpperCamelCase ( __lowercase : int = 80_08_00 ,__lowercase : int = 80_08_00 ):
'''simple docstring'''
A_ : Optional[int] = degree * loga(__lowercase )
A_ : Optional[int] = int(__lowercase )
A_ : Union[str, Any] = calculate_prime_numbers(__lowercase )
A_ : Any = 0
A_ : List[Any] = 0
A_ : int = len(__lowercase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 558
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[int]=False ):
'''simple docstring'''
try:
A_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A_ : Any = default
else:
# KEY is set, convert it to True or False.
try:
A_ : Dict = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_UpperCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
_UpperCAmelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
_UpperCAmelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
_UpperCAmelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
_UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_UpperCAmelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
A_ : Dict = unittest.skip('test requires faiss' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
A_ : Dict = unittest.skip('test requires regex' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
A_ : Union[str, Any] = unittest.skip('test requires elasticsearch' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
A_ : Dict = unittest.skip('test requires sqlalchemy' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
A_ : Tuple = unittest.skip('test requires PyTorch' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if not config.TF_AVAILABLE:
A_ : int = unittest.skip('test requires TensorFlow' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
A_ : Union[str, Any] = unittest.skip('test requires JAX' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
A_ : Union[str, Any] = unittest.skip('test requires Pillow' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
def _require_spacy_model(__lowercase : List[Any] ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
A_ : Union[str, Any] = unittest.skip('test is slow' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
A_ : Union[str, Any] = unittest.skip('test is local' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
A_ : Dict = unittest.skip('test is packaged' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
A_ : Any = unittest.skip('test requires remote' )(__lowercase )
return test_case
def UpperCamelCase ( *__lowercase : Optional[int] ):
'''simple docstring'''
def decorate(cls : List[Any] ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith('test' ):
for decorator in decorators:
A_ : str = decorator(__lowercase )
setattr(cls ,__lowercase ,__lowercase )
return cls
return decorate
class UpperCAmelCase ( __A ):
'''simple docstring'''
pass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
@contextmanager
def UpperCamelCase ( __lowercase : List[Any]=OfflineSimulationMode.CONNECTION_FAILS ,__lowercase : int=1e-1_6 ):
'''simple docstring'''
A_ : Any = requests.Session().request
def timeout_request(__lowercase : Dict ,__lowercase : Optional[Any] ,__lowercase : int ,**__lowercase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
A_ : Optional[Any] = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
A_ : Any = timeout
try:
return online_request(__lowercase ,__lowercase ,**__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
A_ : int = url
A_ : Any = e.args[0]
A_ : int = (max_retry_error.args[0].replace('10.255.255.1' ,f'''OfflineMock[{url}]''' ),)
A_ : Optional[int] = (max_retry_error,)
raise
def raise_connection_error(__lowercase : List[Any] ,__lowercase : List[Any] ,**__lowercase : Optional[Any] ):
raise requests.ConnectionError('Offline mode is enabled.' ,request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' ,__lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' ,__lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' ,__lowercase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCamelCase ( *__lowercase : Union[str, Any] ,**__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase ,**__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def UpperCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
A_ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
A_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Any ):
'''simple docstring'''
return deepcopy(__lowercase ).integers(0 ,1_00 ,10 ).tolist() == deepcopy(__lowercase ).integers(0 ,1_00 ,10 ).tolist()
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase : List[Any] ,*__lowercase : Dict ,**__lowercase : str ):
try:
return func(*__lowercase ,**__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper ,__lowercase )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = returncode
A_ : Any = stdout
A_ : Any = stderr
async def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : str ):
'''simple docstring'''
while True:
A_ : int = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : List[str]=None ,__lowercase : List[str]=None ,__lowercase : Dict=None ,__lowercase : Dict=False ,__lowercase : str=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' ,' '.join(__lowercase ) )
A_ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A_ : Optional[Any] = []
A_ : str = []
def tee(__lowercase : Union[str, Any] ,__lowercase : Union[str, Any] ,__lowercase : List[str] ,__lowercase : Tuple="" ):
A_ : Optional[Any] = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase ,__lowercase ,file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __lowercase : tee(__lowercase ,__lowercase ,sys.stdout ,label='stdout:' ) ),
_read_stream(p.stderr ,lambda __lowercase : tee(__lowercase ,__lowercase ,sys.stderr ,label='stderr:' ) ),
] ,timeout=__lowercase ,)
return _RunOutput(await p.wait() ,__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Union[str, Any]=None ,__lowercase : Tuple=None ,__lowercase : Optional[int]=1_80 ,__lowercase : Dict=False ,__lowercase : List[str]=True ):
'''simple docstring'''
A_ : Optional[Any] = asyncio.get_event_loop()
A_ : str = loop.run_until_complete(
_stream_subprocess(__lowercase ,env=__lowercase ,stdin=__lowercase ,timeout=__lowercase ,quiet=__lowercase ,echo=__lowercase ) )
A_ : str = ' '.join(__lowercase )
if result.returncode > 0:
A_ : Union[str, Any] = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = os.environ.get('PYTEST_XDIST_WORKER' ,'gw0' )
A_ : Dict = re.sub(r'^gw' ,'' ,__lowercase ,0 ,re.M )
return int(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = 2_95_00
A_ : Any = pytest_xdist_worker_id()
return port + uniq_delta
| 558
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = "laion/clap-htsat-unfused"
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
def a_ ( self : Optional[Any] , **UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_)
def a_ ( self : List[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : str = self.get_feature_extractor()
__UpperCAmelCase : List[Any] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : Any = self.get_feature_extractor(do_normalize=UpperCamelCase_ , padding_value=1.0)
__UpperCAmelCase : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Any = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = floats_list((3, 1000))
__UpperCAmelCase : Tuple = feature_extractor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Any = processor(audios=UpperCamelCase_ , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : str = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Any = "This is a test string"
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Dict = tokenizer(UpperCamelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = self.get_feature_extractor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[str] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_feature_extractor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : int = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 487
| 1
|
import logging
from transformers import PretrainedConfig
lowercase_ = logging.getLogger(__name__)
lowercase_ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = "bertabs"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Dict=6 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=8 , SCREAMING_SNAKE_CASE_ : int=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=0.2 , SCREAMING_SNAKE_CASE_ : Any=6 , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : Any=8 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : str=0.2 , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
_a = vocab_size
_a = max_pos
_a = enc_layers
_a = enc_hidden_size
_a = enc_heads
_a = enc_ff_size
_a = enc_dropout
_a = dec_layers
_a = dec_hidden_size
_a = dec_heads
_a = dec_ff_size
_a = dec_dropout
| 562
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 562
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
UpperCamelCase_ = "▁"
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : List[Any] =["input_ids", "attention_mask"]
UpperCamelCase__ : List[int] =[]
def __init__( self :Tuple , _lowercase :Dict=None , _lowercase :Dict=None , _lowercase :Optional[int]="<unk>" , _lowercase :str="<s>" , _lowercase :Optional[Any]="</s>" , _lowercase :Optional[Any]="<pad>" , _lowercase :Optional[Any]="[SEP]" , _lowercase :int="[MASK]" , _lowercase :Optional[Any]="[CLS]" , **_lowercase :str , ) -> int:
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else bos_token
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else eos_token
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else unk_token
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else pad_token
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else cls_token
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
def __a ( self :Optional[Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self :Optional[Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_lowercase)) + [1]
return [1] + ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1]
def __a ( self :List[str] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __a ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase):
copyfile(self.vocab_file , _lowercase)
return (out_vocab_file,)
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A ( __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser('''test''' )
else:
UpperCAmelCase_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase_ = script_name
else:
UpperCAmelCase_ = f"--config_file={args.config_file} {script_name}"
UpperCAmelCase_ = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase_ = execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = test_command_parser()
UpperCAmelCase_ = parser.parse_args()
test_command(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 561
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( __lowercase ):
UpperCamelCase__ = '''mobilenet_v2'''
def __init__( self :str , __magic_name__ :str=3 , __magic_name__ :Any=224 , __magic_name__ :Any=1.0 , __magic_name__ :Dict=8 , __magic_name__ :List[str]=8 , __magic_name__ :Any=6 , __magic_name__ :Optional[int]=32 , __magic_name__ :Optional[Any]=True , __magic_name__ :str=True , __magic_name__ :int="relu6" , __magic_name__ :Union[str, Any]=True , __magic_name__ :Optional[Any]=0.8 , __magic_name__ :int=0.02 , __magic_name__ :Optional[int]=0.001 , __magic_name__ :Union[str, Any]=255 , **__magic_name__ :Tuple , ):
'''simple docstring'''
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
a = num_channels
a = image_size
a = depth_multiplier
a = depth_divisible_by
a = min_depth
a = expand_ratio
a = output_stride
a = first_layer_is_expansion
a = finegrained_output
a = hidden_act
a = tf_padding
a = classifier_dropout_prob
a = initializer_range
a = layer_norm_eps
a = semantic_loss_ignore_index
class __lowerCAmelCase ( __lowercase ):
UpperCamelCase__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return 1E-4
| 468
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase__ = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def A(__a: List[Any] ):
lowerCAmelCase_ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __a )
return [m.group(0 ) for m in matches]
def A():
lowerCAmelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCAmelCase_ = collections.defaultdict(__a )
lowerCAmelCase_ = collections.defaultdict(__a )
lowerCAmelCase_ = collections.defaultdict(__a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__a ):
lowerCAmelCase_ = None
if _re_tf_models.match(__a ) is not None:
lowerCAmelCase_ = tf_models
lowerCAmelCase_ = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
lowerCAmelCase_ = flax_models
lowerCAmelCase_ = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
lowerCAmelCase_ = pt_models
lowerCAmelCase_ = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCAmelCase_ = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ = "".join(camel_case_split(__a )[:-1] )
lowerCAmelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCAmelCase_ = list(__a )
all_models.sort()
lowerCAmelCase_ = {"model_type": all_models}
lowerCAmelCase_ = [pt_models[t] for t in all_models]
lowerCAmelCase_ = [tf_models[t] for t in all_models]
lowerCAmelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCAmelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCAmelCase_ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCAmelCase_ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCAmelCase_ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCAmelCase_ = "AutoTokenizer"
lowerCAmelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(__a )
def A(__a: List[str] ):
lowerCAmelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCAmelCase_ = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
lowerCAmelCase_ = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(__a , __a , __a ):
# The type of pipeline may not exist in this framework
if not hasattr(__a , __a ):
continue
# First extract all model_names
lowerCAmelCase_ = []
for name in getattr(__a , __a ).values():
if isinstance(__a , __a ):
model_names.append(__a )
else:
model_names.extend(list(__a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A(__a: Tuple , __a: int ):
lowerCAmelCase_ = get_frameworks_table()
lowerCAmelCase_ = Dataset.from_pandas(__a )
lowerCAmelCase_ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__a )
lowerCAmelCase_ = Dataset.from_json(__a )
lowerCAmelCase_ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__a ) )
}
lowerCAmelCase_ = update_pipeline_and_auto_class_table(__a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCAmelCase_ = sorted(table.keys() )
lowerCAmelCase_ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
lowerCAmelCase_ = Dataset.from_pandas(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__a , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__a , "pipeline_tags.json" ) )
if commit_sha is not None:
lowerCAmelCase_ = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
lowerCAmelCase_ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__a , repo_type="dataset" , token=__a , commit_message=__a , )
def A():
lowerCAmelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCAmelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCAmelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCAmelCase_ = pipeline_tasks[key]["pt"]
if isinstance(__a , (list, tuple) ):
lowerCAmelCase_ = model[0]
lowerCAmelCase_ = model.__name__
if model not in in_table.values():
missing.append(__a )
if len(__a ) > 0:
lowerCAmelCase_ = ", ".join(__a )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCamelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 122
| 0
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = GPTaTokenizer
__lowerCamelCase = GPTaTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = {"""add_prefix_space""": True}
__lowerCamelCase = False
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
snake_case__ : Any = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
snake_case__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : str = {'unk_token': '<unk>'}
snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def __a ( self , **__UpperCamelCase ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __a ( self , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : Tuple = 'lower newer'
snake_case__ : Optional[int] = 'lower newer'
return input_text, output_text
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : List[Any] = 'lower newer'
snake_case__ : List[Any] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case__ : int = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : int = tokens + [tokenizer.unk_token]
snake_case__ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
snake_case__ : str = 'lower newer'
# Testing tokenization
snake_case__ : Optional[int] = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
snake_case__ : Optional[int] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids without special tokens
snake_case__ : List[Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
snake_case__ : str = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
snake_case__ : Union[str, Any] = tokenizer.encode(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
snake_case__ : Union[str, Any] = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing the unknown token
snake_case__ : Optional[Any] = tokens + [rust_tokenizer.unk_token]
snake_case__ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
pass
def __a ( self , __UpperCamelCase=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# Simple input
snake_case__ : int = 'This is a simple input'
snake_case__ : Tuple = ['This is a simple input 1', 'This is a simple input 2']
snake_case__ : int = ('This is a simple input', 'This is a pair')
snake_case__ : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
snake_case__ : List[str] = 'This is a simple input'
snake_case__ : Any = ['This is a simple input looooooooong', 'This is a simple input']
snake_case__ : Dict = ('This is a simple input', 'This is a pair')
snake_case__ : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
snake_case__ : List[Any] = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer(__UpperCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
snake_case__ : List[str] = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='np' )
snake_case__ : Union[str, Any] = tokenizer(*__UpperCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
snake_case__ : Union[str, Any] = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = '$$$'
snake_case__ : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCamelCase , add_bos_token=__UpperCamelCase )
snake_case__ : Optional[Any] = 'This is a simple input'
snake_case__ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
snake_case__ : Dict = tokenizer.bos_token_id
snake_case__ : str = tokenizer(__UpperCamelCase )
snake_case__ : List[Any] = tokenizer(__UpperCamelCase )
self.assertEqual(out_s.input_ids[0] , __UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : Optional[Any] = tokenizer.decode(out_s.input_ids )
snake_case__ : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = [self.get_tokenizer(do_lower_case=__UpperCamelCase , add_bos_token=__UpperCamelCase )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : Dict = 'Encode this.'
snake_case__ : Union[str, Any] = 'This one too please.'
snake_case__ : Union[str, Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
encoded_sequence += tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
snake_case__ : Dict = tokenizer.encode_plus(
__UpperCamelCase , __UpperCamelCase , add_special_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , )
snake_case__ : Optional[int] = encoded_sequence_dict['input_ids']
snake_case__ : Tuple = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
snake_case__ : str = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__UpperCamelCase )
]
snake_case__ : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@require_tokenizers
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__UpperCamelCase )
snake_case__ : Tuple = 'A photo of a cat'
snake_case__ : Union[str, Any] = tokenizer.encode(
__UpperCamelCase , )
self.assertEqual(__UpperCamelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
snake_case__ : List[str] = AutoTokenizer.from_pretrained('./test_opt' )
snake_case__ : Any = tokenizer.encode(
__UpperCamelCase , )
self.assertEqual(__UpperCamelCase , [2, 250, 1345, 9, 10, 4758] )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=__UpperCamelCase )
snake_case__ : Union[str, Any] = 'A photo of a cat'
snake_case__ : Tuple = tokenizer.encode(
__UpperCamelCase , )
# Same as above
self.assertEqual(__UpperCamelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__UpperCamelCase )
snake_case__ : List[str] = 'bos'
snake_case__ : Dict = tokenizer.get_vocab()['bos']
snake_case__ : Union[str, Any] = 'A photo of a cat'
snake_case__ : Optional[Any] = tokenizer.encode(
__UpperCamelCase , )
# We changed the bos token
self.assertEqual(__UpperCamelCase , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
snake_case__ : Dict = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
snake_case__ : Any = tokenizer.encode(
__UpperCamelCase , )
self.assertEqual(__UpperCamelCase , [31957, 250, 1345, 9, 10, 4758] )
| 699
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ : Optional[int] = TypeVar('''T''')
class __snake_case ( Generic[T] ):
def __init__( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = data
snake_case__ : Node[T] | None = None
def __str__( self ) -> str:
'''simple docstring'''
return F"""{self.data}"""
class __snake_case ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
snake_case__ : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
'''simple docstring'''
snake_case__ : str = self.top
while node:
yield node.data
snake_case__ : Dict = node.next
def __str__( self ) -> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __len__( self ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __a ( self ) -> bool:
'''simple docstring'''
return self.top is None
def __a ( self , __UpperCamelCase ) -> None:
'''simple docstring'''
snake_case__ : str = Node(__UpperCamelCase )
if not self.is_empty():
snake_case__ : List[str] = self.top
snake_case__ : Tuple = node
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , __UpperCamelCase )
snake_case__ : List[str] = self.top
snake_case__ : Union[str, Any] = self.top.next
return pop_node.data
def __a ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __a ( self ) -> None:
'''simple docstring'''
snake_case__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple =logging.get_logger(__name__)
A__ : List[str] ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: int = '''vit_mae'''
def __init__( self : Optional[Any] , __snake_case : List[Any]=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : int=12 , __snake_case : List[Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Optional[int]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Any=0.02 , __snake_case : str=1E-1_2 , __snake_case : Any=2_24 , __snake_case : List[str]=16 , __snake_case : Union[str, Any]=3 , __snake_case : Any=True , __snake_case : Any=16 , __snake_case : Optional[int]=5_12 , __snake_case : Tuple=8 , __snake_case : Any=20_48 , __snake_case : List[str]=0.75 , __snake_case : Optional[int]=False , **__snake_case : Any , ) -> str:
super().__init__(**__snake_case )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = decoder_num_attention_heads
_lowerCAmelCase = decoder_hidden_size
_lowerCAmelCase = decoder_num_hidden_layers
_lowerCAmelCase = decoder_intermediate_size
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = norm_pix_loss
| 207
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ : List[str] =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
A__ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase = canny.canny(lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowerCAmelCase , 5 , sigma=0.9 ).all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase = conv.img_convolve(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase )
assert res.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert med.median_filter(lowerCAmelCase , 3 ).any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(lowerCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = sp.make_sepia(lowerCAmelCase , 20 )
assert sepia.all()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
_lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
_lowerCAmelCase = imread(lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = image[x_coordinate][y_coordinate]
_lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert lbp_image.any()
| 207
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__snake_case = None
__snake_case = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__snake_case = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : str = None
# Automatically constructed
lowerCAmelCase__ : Union[str, Any] = 'PIL.Image.Image'
lowerCAmelCase__ : str = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase__ : List[str] = field(default='Image' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self: List[Any] ) -> List[Any]:
'''simple docstring'''
return self.pa_type
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[int] ) -> int:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowercase__ , lowercase__ ):
__UpperCAmelCase = np.array(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase__ , lowercase__ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase__ )
elif isinstance(lowercase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase__ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Any=None ) -> Dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
__UpperCAmelCase = {}
__UpperCAmelCase = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase__ ):
__UpperCAmelCase = PIL.Image.open(lowercase__ )
else:
__UpperCAmelCase = path.split("::" )[-1]
try:
__UpperCAmelCase = string_to_dict(lowercase__ , config.HUB_DATASETS_URL )["repo_id"]
__UpperCAmelCase = token_per_repo_id.get(lowercase__ )
except ValueError:
__UpperCAmelCase = None
with xopen(lowercase__ , "rb" , use_auth_token=lowercase__ ) as f:
__UpperCAmelCase = BytesIO(f.read() )
__UpperCAmelCase = PIL.Image.open(bytes_ )
else:
__UpperCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _UpperCAmelCase ( self: str ) -> Tuple:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: List[Any] ) -> List[str]:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__UpperCAmelCase = pa.array([None] * len(lowercase__ ) , type=pa.binary() )
__UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCAmelCase = pa.array([None] * len(lowercase__ ) , type=pa.string() )
__UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__UpperCAmelCase = storage.field("bytes" )
else:
__UpperCAmelCase = pa.array([None] * len(lowercase__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__UpperCAmelCase = storage.field("path" )
else:
__UpperCAmelCase = pa.array([None] * len(lowercase__ ) , type=pa.string() )
__UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__UpperCAmelCase = pa.array(
[encode_np_array(np.array(lowercase__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__UpperCAmelCase = pa.array([None] * len(lowercase__ ) , type=pa.string() )
__UpperCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowercase__ , self.pa_type )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Any ) -> Union[str, Any]:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase: str ):
with xopen(lowercase__ , "rb" ) as f:
__UpperCAmelCase = f.read()
return bytes_
__UpperCAmelCase = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCAmelCase = pa.array(
[os.path.basename(lowercase__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
__UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowercase__ , self.pa_type )
def __lowerCAmelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__UpperCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( A_ : "PIL.Image.Image" ) -> bytes:
__UpperCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
__UpperCAmelCase = image.format
else:
__UpperCAmelCase = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(SCREAMING_SNAKE_CASE_ , format=SCREAMING_SNAKE_CASE_ )
return buffer.getvalue()
def __lowerCAmelCase ( A_ : "PIL.Image.Image" ) -> dict:
if hasattr(SCREAMING_SNAKE_CASE_ , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def __lowerCAmelCase ( A_ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
__UpperCAmelCase = array.dtype
__UpperCAmelCase = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
__UpperCAmelCase = dtype.kind
__UpperCAmelCase = dtype.itemsize
__UpperCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__UpperCAmelCase = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__UpperCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__UpperCAmelCase = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = np.dtype(SCREAMING_SNAKE_CASE_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__UpperCAmelCase = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE_ ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def __lowerCAmelCase ( A_ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
__UpperCAmelCase = first_non_null_value(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
__UpperCAmelCase = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
__UpperCAmelCase = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
else:
return objs
else:
return objs
| 702
|
def __lowerCAmelCase ( A_ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__UpperCAmelCase = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
a_ = input("""Enter a string """).strip()
a_ = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 286
| 0
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list[float] ,a__ : list[float] ) -> float:
__A : str = sorted(numsa + numsa )
__A , __A : Any = divmod(len(a__ ) ,2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = [float(x) for x in input('''Enter the elements of first array: ''').split()]
UpperCAmelCase_ : List[str] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 17
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowerCAmelCase : Dict = float('nan')
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = sys.stdout
lowerCamelCase = open(__snake_case , 'a' )
def __getattr__( self : int , __snake_case : str ) -> Tuple:
'''simple docstring'''
return getattr(self.stdout , __snake_case )
def lowerCamelCase__ ( self : Dict , __snake_case : List[Any] ) -> int:
'''simple docstring'''
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , __snake_case , 0 , re.M ) )
def a_ ( UpperCamelCase_ : List[str]=8_0 , UpperCamelCase_ : Optional[Any]=False ) -> Any:
"""simple docstring"""
lowerCamelCase = []
# deal with critical env vars
lowerCamelCase = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase = os.environ.get(UpperCamelCase_ , UpperCamelCase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
lowerCamelCase = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCamelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase = []
lowerCamelCase = ''
while len(UpperCamelCase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase_ )
lowerCamelCase = ''
return "\\\n".join(UpperCamelCase_ )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
lowerCamelCase = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
lowerCamelCase = subprocess.run(UpperCamelCase_ , capture_output=UpperCamelCase_ , text=UpperCamelCase_ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase = variation.replace(' ' , '-' )
with open(Path(UpperCamelCase_ ) / f'''log.{prefix}.stdout.txt''' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase_ ) / f'''log.{prefix}.stderr.txt''' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , 'r' , encoding='utf-8' ) as f:
lowerCamelCase = json.load(UpperCamelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = f'''{id}: {variation:<{longest_variation_len}}'''
lowerCamelCase = f'''{preamble}: '''
lowerCamelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase_ ) , desc=UpperCamelCase_ , leave=UpperCamelCase_ ):
lowerCamelCase = process_run_single(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase_ ):
metrics.append(UpperCamelCase_ )
results.append(UpperCamelCase_ )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase = f'''\33[2K\r{outcome}'''
if len(UpperCamelCase_ ) > 0:
lowerCamelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase = f'''{outcome} {mean_target}'''
if len(UpperCamelCase_ ) > 1:
results_str += f''' {tuple(round(UpperCamelCase_ , 2 ) for x in results )}'''
print(UpperCamelCase_ )
lowerCamelCase = variation
return mean_metrics
else:
print(UpperCamelCase_ )
return {variation_key: variation, target_metric_key: nan}
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB
'''
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase = pd.DataFrame(UpperCamelCase_ )
lowerCamelCase = 'variation'
lowerCamelCase = 'diff_%'
lowerCamelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase_ ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase_ ):
lowerCamelCase = df.apply(
lambda UpperCamelCase_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase = df.reindex(UpperCamelCase_ , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase = df.rename(lambda UpperCamelCase_ : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase = df.rename(lambda UpperCamelCase_ : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase_ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase_ , floatfmt='.2f' )]
print('\n\n'.join(UpperCamelCase_ ) )
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCamelCase_ , type=UpperCamelCase_ , nargs='+' , required=UpperCamelCase_ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCamelCase_ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCamelCase_ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCamelCase_ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCamelCase_ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.output_dir
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
lowerCamelCase = get_base_command(UpperCamelCase_ , UpperCamelCase_ )
# split each dimension into its --foo variations
lowerCamelCase = [list(map(str.strip , re.split(R'\|' , UpperCamelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase = list(map(str.strip , map(' '.join , itertools.product(*UpperCamelCase_ ) ) ) )
lowerCamelCase = max(len(UpperCamelCase_ ) for x in variations )
# split wanted keys
lowerCamelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
lowerCamelCase = Tee(UpperCamelCase_ )
print(f'''\n*** Running {len(UpperCamelCase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(UpperCamelCase_ )}''' )
lowerCamelCase = 'variation'
lowerCamelCase = []
for id, variation in enumerate(tqdm(UpperCamelCase_ , desc='Total completion: ' , leave=UpperCamelCase_ ) ):
lowerCamelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , args.target_metric_key , UpperCamelCase_ , args.repeat_times , UpperCamelCase_ , args.verbose , ) )
process_results(UpperCamelCase_ , args.target_metric_key , UpperCamelCase_ , args.base_variation , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 246
| 0
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase__ : str = logging.getLogger(__name__)
lowerCAmelCase__ : str = """pytorch_model.bin"""
@dataclasses.dataclass
class a :
"""simple docstring"""
__UpperCAmelCase = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class a :
"""simple docstring"""
__UpperCAmelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
__UpperCAmelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """The name of the task to train on."""} , )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class a :
"""simple docstring"""
__UpperCAmelCase = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
__UpperCAmelCase = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
__UpperCAmelCase = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
__UpperCAmelCase = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__UpperCAmelCase = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
__UpperCAmelCase = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
__UpperCAmelCase = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__UpperCAmelCase = dataclasses.field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Random seed for initialization."""} , )
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
snake_case__ : List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case__ : List[str] = dataset.filter(lambda __lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case__ : List[Any] = int(eval_result * len(__lowerCAmelCase ) )
print(__lowerCAmelCase )
snake_case__ : List[Any] = dataset.sort('''probability''' , reverse=__lowerCAmelCase )
snake_case__ : Union[str, Any] = dataset.select(range(__lowerCAmelCase ) )
snake_case__ : List[str] = dataset.remove_columns(['''label''', '''probability'''] )
snake_case__ : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
snake_case__ : Dict = dataset.map(lambda __lowerCAmelCase : {"label": idalabel[example["label"]]} )
snake_case__ : int = dataset.shuffle(seed=args.seed )
snake_case__ : Dict = os.path.join(__lowerCAmelCase , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCAmelCase , index=__lowerCAmelCase )
else:
dataset.to_json(__lowerCAmelCase )
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
snake_case__ : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case__ : Any = STModelArguments(model_name_or_path=__lowerCAmelCase )
snake_case__ : Optional[int] = STDataArguments(train_file=__lowerCAmelCase , infer_file=__lowerCAmelCase )
snake_case__ : List[Any] = STTrainingArguments(output_dir=__lowerCAmelCase )
snake_case__ : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCAmelCase ).items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Sanity checks
snake_case__ : int = {}
snake_case__ : Optional[Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case__ : List[Any] = args.train_file
snake_case__ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case__ : List[str] = args.eval_file
for key in data_files:
snake_case__ : Optional[int] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
snake_case__ : List[Any] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case__ : Optional[int] = F"""{args.output_dir}/self-train_iter-{{}}""".format
snake_case__ : str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
accelerator.wait_for_everyone()
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = None
snake_case__ : str = 0
snake_case__ : Optional[Any] = False
# Show the progress bar
snake_case__ : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case__ : int = data_dir_format(__lowerCAmelCase )
assert os.path.exists(__lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case__ : List[str] = os.path.join(__lowerCAmelCase , '''stage-1''' )
snake_case__ : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
arguments_dict.update({key: value} )
snake_case__ : Tuple = os.path.join(__lowerCAmelCase , '''best-checkpoint''' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , __lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case__ : Union[str, Any] = os.path.join(__lowerCAmelCase , '''best-checkpoint''' )
snake_case__ : int = os.path.join(__lowerCAmelCase , '''stage-2''' )
# Update arguments_dict
snake_case__ : List[str] = model_path
snake_case__ : str = data_files['''train''']
snake_case__ : str = current_output_dir
snake_case__ : Tuple = os.path.join(__lowerCAmelCase , '''best-checkpoint''' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , __lowerCAmelCase )
snake_case__ : int = iteration
snake_case__ : Optional[int] = data_dir_format(iteration + 1 )
snake_case__ : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase , '''best-checkpoint''' ) )
snake_case__ : Any = config.idalabel
snake_case__ : int = os.path.join(__lowerCAmelCase , '''eval_results_best-checkpoint.json''' )
snake_case__ : List[str] = os.path.join(__lowerCAmelCase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(__lowerCAmelCase )
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : Dict = float(json.load(__lowerCAmelCase )[args.eval_metric] )
snake_case__ : List[Any] = os.path.join(__lowerCAmelCase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__lowerCAmelCase )
# Loading the dataset from local csv or json files.
snake_case__ : List[str] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case__ : Dict = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowerCAmelCase ):
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.wait_for_everyone()
snake_case__ : int = os.path.join(__lowerCAmelCase , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case__ : Tuple = eval_result
if best_iteration is None:
snake_case__ : Optional[Any] = new_iteration
snake_case__ : Any = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case__ : int = new_iteration
snake_case__ : Optional[int] = new_eval_result
snake_case__ : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
snake_case__ : Optional[Any] = new_iteration
snake_case__ : Optional[int] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case__ : Optional[int] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , __lowerCAmelCase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCAmelCase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCAmelCase , '''eval_results_best-iteration.json''' ) , )
| 502
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , '''width_multiplier''' ) )
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict=1_3 , snake_case_ : Any=6_4 , snake_case_ : Dict=2 , snake_case_ : Optional[int]=3 , snake_case_ : str="swish" , snake_case_ : str=3 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Any=0.0_2 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Dict=1_0 , snake_case_ : Optional[int]=None , snake_case_ : str=0.2_5 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=0.0 , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Tuple = make_divisible(5_1_2 * width_multiplier , divisor=8 )
snake_case__ : Optional[int] = hidden_act
snake_case__ : int = conv_kernel_size
snake_case__ : Optional[int] = output_stride
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = use_labels
snake_case__ : Optional[Any] = is_training
snake_case__ : int = num_labels
snake_case__ : str = initializer_range
snake_case__ : Dict = scope
snake_case__ : Tuple = width_multiplier
snake_case__ : Optional[Any] = ffn_dropout
snake_case__ : Dict = attn_dropout
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = MobileViTVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Optional[Any] = MobileViTVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : int , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : Any = MobileViTVaForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : str = MobileViTVaModelTester(self )
snake_case__ : Union[str, Any] = MobileViTVaConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(snake_case_ )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Union[str, Any] = outputs.hidden_states
snake_case__ : Any = 5
self.assertEqual(len(snake_case_ ) , snake_case_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Dict = 2
for i in range(len(snake_case_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : int = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = MobileViTVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
snake_case_ )
snake_case__ : Any = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Tuple = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : Any = model.to(snake_case_ )
snake_case__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**snake_case_ )
snake_case__ : Tuple = outputs.logits
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=snake_case_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : List[str] = model.to(snake_case_ )
snake_case__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
snake_case__ : str = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_0, 6_0)] )
snake_case__ : int = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , snake_case_ )
snake_case__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
snake_case__ : Any = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 502
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] ,**A_ : List[Any] ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> bool:
return self.n_gpu > 0
| 91
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : str = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 588
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = inspect.getfile(accelerate.test_utils )
A_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A_ : Optional[int] = test_metrics
@require_cpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
A_ : List[str] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 302
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCAmelCase :
'''simple docstring'''
# setable values
lowercase_ : Optional[int] = None
lowercase_ : Optional[jnp.ndarray] = None
lowercase_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
return cls()
@dataclass
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : KarrasVeSchedulerState
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case_ = 0.02 , snake_case_ = 1_0_0 , snake_case_ = 1.0_07 , snake_case_ = 8_0 , snake_case_ = 0.05 , snake_case_ = 5_0 , ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ = () ):
"""simple docstring"""
A_ : int = jnp.arange(0 , snake_case_ )[::-1].copy()
A_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=snake_case_ , schedule=jnp.array(snake_case_ , dtype=jnp.floataa ) , timesteps=snake_case_ , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
A_ : str = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Any = random.split(snake_case_ , num=1 )
A_ : Union[str, Any] = self.config.s_noise * random.normal(key=snake_case_ , shape=sample.shape )
A_ : str = sigma + gamma * sigma
A_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
"""simple docstring"""
A_ : Any = sample_hat + sigma_hat * model_output
A_ : str = (sample_hat - pred_original_sample) / sigma_hat
A_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case_ , derivative=snake_case_ , state=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
"""simple docstring"""
A_ : List[Any] = sample_prev + sigma_prev * model_output
A_ : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
A_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case_ , derivative=snake_case_ , state=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError()
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 276
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__magic_name__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 16_000 ):
snake_case__ = int(round(sample_rate * max_length ) )
if len(__lowerCAmelCase ) <= sample_length:
return wav
snake_case__ = randint(0 , len(__lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : Optional[str] = field(default=__UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'} )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'} )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_A : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_A : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_A : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_A : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_A : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_A : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_A : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_A : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_A : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_A : bool = field(
default=__UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_A : bool = field(
default=__UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_A : bool = field(
default=__UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_A : Optional[bool] = field(
default=__UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_A : bool = field(
default=__UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A_ ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case__ = feature_extractor.model_input_names[0]
def train_transforms(__lowerCAmelCase ):
snake_case__ = []
for audio in batch[data_args.audio_column_name]:
snake_case__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowerCAmelCase )
snake_case__ = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
snake_case__ = {model_input_name: inputs.get(__lowerCAmelCase )}
snake_case__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowerCAmelCase ):
snake_case__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
snake_case__ = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
snake_case__ = {model_input_name: inputs.get(__lowerCAmelCase )}
snake_case__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case__ = raw_datasets["train"].features[data_args.label_column_name].names
snake_case__ , snake_case__ = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
snake_case__ = str(__lowerCAmelCase )
snake_case__ = label
# Load the accuracy metric from the datasets package
snake_case__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
snake_case__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=eval_pred.label_ids )
snake_case__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
# Initialize our trainer
snake_case__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# Training
if training_args.do_train:
snake_case__ = None
if training_args.resume_from_checkpoint is not None:
snake_case__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ = last_checkpoint
snake_case__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
snake_case__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 276
| 1
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowercase (*_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Tuple = None , _SCREAMING_SNAKE_CASE :Tuple=True , _SCREAMING_SNAKE_CASE :Optional[Any]=2 ):
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : int = ()
if not isinstance(args[0] , A_ ):
SCREAMING_SNAKE_CASE : Dict = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(A_ ).base_version ) >= version.parse(A_ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
SCREAMING_SNAKE_CASE : Optional[int] = None
if isinstance(A_ , A_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(A_ ),)
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(A_ , A_ ):
values += (getattr(A_ , A_ ),)
SCREAMING_SNAKE_CASE : Optional[int] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
SCREAMING_SNAKE_CASE : str = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , A_ , stacklevel=A_ )
if isinstance(A_ , A_ ) and len(A_ ) > 0:
SCREAMING_SNAKE_CASE : List[str] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.filename
SCREAMING_SNAKE_CASE : str = call_frame.lineno
SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(A_ ) == 0:
return
elif len(A_ ) == 1:
return values[0]
return values
| 710
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
return x + 2
class a__ ( unittest.TestCase ):
def lowercase__ (self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3'''
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Any = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
SCREAMING_SNAKE_CASE : str = '''x = y'''
SCREAMING_SNAKE_CASE : int = {'''y''': 5}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 5, '''y''': 5} )
def lowercase__ (self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = '''y = add_two(x)'''
SCREAMING_SNAKE_CASE : Optional[Any] = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 3'''
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
def lowercase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3\ny = 5'''
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''text = f\'This is x: {x}.\''''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowercase__ (self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 2} )
SCREAMING_SNAKE_CASE : Any = {'''x''': 8}
SCREAMING_SNAKE_CASE : int = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 8, '''y''': 5} )
def lowercase__ (self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''test_list = [x, add_two(x)]'''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, [3, 5] )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''y = x'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 3} )
def lowercase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
SCREAMING_SNAKE_CASE : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 0\nfor i in range(3):\n x = i'''
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {'''range''': range}, state=__UpperCAmelCase )
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 2, '''i''': 2} )
| 355
| 0
|
lowerCamelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 122
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''timesformer'''
def __init__( self , lowerCamelCase=2_24 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=8 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase="divided_space_time" , lowerCamelCase=0 , **lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : int = num_frames
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = qkv_bias
UpperCamelCase : int = attention_type
UpperCamelCase : int = drop_path_rate
| 710
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A__ ( A : List[str]):
'''simple docstring'''
UpperCamelCase : List[Any] = botoa.client("iam")
UpperCamelCase : Optional[int] = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A , AssumeRolePolicyDocument=json.dumps(A , indent=2))
UpperCamelCase : Dict = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A , indent=2) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''')
def A__ ( A : Tuple):
'''simple docstring'''
UpperCamelCase : Dict = botoa.client("iam")
return iam_client.get_role(RoleName=A)["Role"]["Arn"]
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , A , )
UpperCamelCase : List[str] = None
if credentials_configuration == 0:
UpperCamelCase : int = _ask_field("Enter your AWS Profile name: [default] " , default="default")
UpperCamelCase : int = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`")
UpperCamelCase : Any = _ask_field("AWS Access Key ID: ")
UpperCamelCase : Any = aws_access_key_id
UpperCamelCase : Dict = _ask_field("AWS Secret Access Key: ")
UpperCamelCase : int = aws_secret_access_key
UpperCamelCase : int = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1")
UpperCamelCase : Optional[Any] = aws_region
UpperCamelCase : Optional[Any] = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , A , )
if role_management == 0:
UpperCamelCase : Tuple = _ask_field("Enter your IAM role name: ")
else:
UpperCamelCase : Optional[int] = "accelerate_sagemaker_execution_role"
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''')
_create_iam_role_for_sagemaker(A)
UpperCamelCase : Union[str, Any] = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=A , error_message="Please enter yes or no." , )
UpperCamelCase : Union[str, Any] = None
if is_custom_docker_image:
UpperCamelCase : Union[str, Any] = _ask_field("Enter your Docker image: " , lambda A: str(A).lower())
UpperCamelCase : Optional[int] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=A , error_message="Please enter yes or no." , )
UpperCamelCase : List[Any] = None
if is_sagemaker_inputs_enabled:
UpperCamelCase : Union[str, Any] = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda A: str(A).lower() , )
UpperCamelCase : str = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=A , error_message="Please enter yes or no." , )
UpperCamelCase : Optional[Any] = None
if is_sagemaker_metrics_enabled:
UpperCamelCase : Any = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda A: str(A).lower() , )
UpperCamelCase : int = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
UpperCamelCase : int = {}
UpperCamelCase : List[Any] = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=A , error_message="Please enter yes or no." , )
if use_dynamo:
UpperCamelCase : Optional[Any] = "dynamo_"
UpperCamelCase : Tuple = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCamelCase : Optional[Any] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=A , error_message="Please enter yes or no." , )
if use_custom_options:
UpperCamelCase : Any = _ask_options(
"Which mode do you want to use?" , A , lambda A: TORCH_DYNAMO_MODES[int(A)] , default="default" , )
UpperCamelCase : Dict = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=A , error_message="Please enter yes or no." , )
UpperCamelCase : List[str] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=A , error_message="Please enter yes or no." , )
UpperCamelCase : List[str] = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
UpperCamelCase : Union[str, Any] = _ask_options(
A , A , lambda A: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A)])
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCamelCase : Tuple = _ask_field(A , lambda A: str(A).lower() , default="ml.p3.2xlarge")
UpperCamelCase : Optional[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCamelCase : Any = _ask_field(
"How many machines do you want use? [1]: " , A , default=1 , )
UpperCamelCase : List[str] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.")
return SageMakerConfig(
image_uri=A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A , use_cpu=A , dynamo_config=A , eca_instance_type=A , profile=A , region=A , iam_role_name=A , mixed_precision=A , num_machines=A , sagemaker_inputs_file=A , sagemaker_metrics_file=A , )
| 435
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_lowerCAmelCase : Any = logging.getLogger(__name__)
_lowerCAmelCase : int = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_lowerCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'The input training data file (a text file).'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
SCREAMING_SNAKE_CASE = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
SCREAMING_SNAKE_CASE = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
if self.train_file is not None:
__a =self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__a =self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Any ):
"""simple docstring"""
with open(_snake_case , 'r' , encoding='utf-8' ) as f:
__a =[json.loads(_snake_case ) for line in f.read().splitlines() if (len(_snake_case ) > 0 and not line.isspace())]
assert len(_snake_case ) == len(_snake_case )
__a ={c: dataset[c] for c in dataset.column_names}
__a =refs
return Dataset.from_dict(_snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
__a ={}
if data_args.train_file is not None:
__a =data_args.train_file
if data_args.validation_file is not None:
__a =data_args.validation_file
__a =data_args.train_file.split('.' )[-1]
if extension == "txt":
__a ='text'
__a =load_dataset(_snake_case , data_files=_snake_case )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a =AutoConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
__a =AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
__a =CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
__a ={
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__a =AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_snake_case )
elif model_args.model_name_or_path:
__a =AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__a =AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__a =AutoModelForMaskedLM.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__a =datasets['train'].column_names
else:
__a =datasets['validation'].column_names
__a ='text' if 'text' in column_names else column_names[0]
__a ='max_length' if data_args.pad_to_max_length else False
def tokenize_function(_snake_case : List[str] ):
# Remove empty lines
__a =[line for line in examples['text'] if len(_snake_case ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_snake_case , truncation=_snake_case , max_length=data_args.max_seq_length )
__a =datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__a =add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__a =add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__a =data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__a =False
# Data collator
# This one will take care of randomly masking the tokens.
__a =DataCollatorForWholeWordMask(tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__a =last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__a =model_args.model_name_or_path
else:
__a =None
__a =trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
__a =os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__a ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate()
__a =math.exp(eval_output['eval_loss'] )
__a =perplexity
__a =os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 242
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case="" , __snake_case="train" ) -> Optional[Any]:
'''simple docstring'''
assert os.path.isdir(__snake_case )
__a =[]
__a =os.listdir(__snake_case )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__a =os.path.join(__snake_case , __snake_case )
if not os.path.isfile(__snake_case ):
continue
self.documents.append(__snake_case )
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
__a =self.documents[idx]
__a =document_path.split('/' )[-1]
with open(__snake_case , encoding='utf-8' ) as source:
__a =source.read()
__a , __a =process_story(__snake_case )
return document_name, story_lines, summary_lines
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =list(filter(lambda _snake_case : len(_snake_case ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__a =[_add_missing_period(_snake_case ) for line in nonempty_lines]
# gather article lines
__a =[]
__a =deque(_snake_case )
while True:
try:
__a =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_snake_case )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__a =list(filter(lambda _snake_case : not t.startswith('@highlight' ) , _snake_case ) )
return story_lines, summary_lines
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
__a =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def UpperCamelCase_( _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : int ):
"""simple docstring"""
if len(_snake_case ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_snake_case )) )
return sequence
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple ):
"""simple docstring"""
__a =torch.ones_like(_snake_case )
__a =sequence == pad_token_id
__a =0
return mask
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[int] ):
"""simple docstring"""
__a =[tokenizer.encode(_snake_case ) for line in story_lines]
__a =[token for sentence in story_lines_token_ids for token in sentence]
__a =[tokenizer.encode(_snake_case ) for line in summary_lines]
__a =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def UpperCamelCase_( _snake_case : str , _snake_case : Any ):
"""simple docstring"""
__a =[]
for sequence in batch:
__a =-1
__a =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_snake_case )
return torch.tensor(_snake_case )
| 242
| 1
|
"""simple docstring"""
import requests
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None:
__a = {'''Content-Type''': '''application/json'''}
__a = requests.post(lowerCAmelCase__ , json={'''text''': message_body} , headers=lowerCAmelCase__ )
if response.status_code != 200:
__a = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 705
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['input_features', 'attention_mask']
def __init__( self , _a=80 , _a=16_000 , _a=0.0 , _a=10 , _a=25 , _a="hamming_window" , _a=3_2768.0 , _a=0.97 , _a=1.0 , _a=True , _a=True , _a=False , **_a , ):
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = hop_length
__a = win_length
__a = frame_signal_scale
__a = preemphasis_coeff
__a = mel_floor
__a = normalize_means
__a = normalize_vars
__a = win_function
__a = return_attention_mask
__a = win_length * sampling_rate // 1_000
__a = hop_length * sampling_rate // 1_000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self , _a ):
if self.win_function == "hamming_window":
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
__a = window_function(window_length=self.sample_size , name=self.win_function )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__a = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __UpperCAmelCase ( self , _a , _a , _a ):
# make sure we normalize float32 arrays
if self.normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_a , _a )
if self.normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_a , _a )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _a , _a = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self , _a , _a = False , _a = None , _a = False , _a = None , _a = None , _a = None , _a = None , **_a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
__a = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features} )
__a = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
__a = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__a = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 65
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 564
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Optional[int]=None ) -> List[str]:
if attention_mask is None:
__lowerCAmelCase : str = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case_ :
A_ = OPTConfig
A_ = {}
A_ = 'gelu'
def __init__( self : Dict , _snake_case : Any , _snake_case : List[str]=13 , _snake_case : Optional[Any]=7 , _snake_case : Tuple=True , _snake_case : Optional[int]=False , _snake_case : str=99 , _snake_case : Optional[Any]=16 , _snake_case : Optional[int]=2 , _snake_case : Union[str, Any]=4 , _snake_case : int=4 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Tuple=20 , _snake_case : str=2 , _snake_case : str=1 , _snake_case : str=0 , _snake_case : Tuple=16 , _snake_case : List[Any]=16 , )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : int = seq_length
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : List[Any] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : List[str] = eos_token_id
__lowerCAmelCase : Optional[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Optional[int] = embed_dim
__lowerCAmelCase : List[str] = word_embed_proj_dim
__lowerCAmelCase : Dict = False
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
__lowerCAmelCase : str = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def UpperCAmelCase__ ( self : str , _snake_case : List[str] , _snake_case : Tuple )->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = TFOPTModel(config=_snake_case )
__lowerCAmelCase : Union[str, Any] = inputs_dict["""input_ids"""]
__lowerCAmelCase : Optional[int] = input_ids[:1, :]
__lowerCAmelCase : List[str] = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : Optional[Any] = 1
# first forward pass
__lowerCAmelCase : List[Any] = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
__lowerCAmelCase , __lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase : Dict = model(_snake_case , attention_mask=_snake_case )[0]
__lowerCAmelCase : Dict = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A_ = (TFOPTForCausalLM,) if is_tf_available() else ()
A_ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
A_ = False
A_ = False
A_ = False
A_ = 10
def UpperCAmelCase__ ( self : List[Any] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = TFOPTModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def UpperCAmelCase__ ( self : List[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_snake_case : List[str] , _snake_case : Any ):
if hasattr(_snake_case , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCAmelCase : Union[str, Any] = model_class(config=_snake_case )
__lowerCAmelCase : str = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowerCAmelCase : Optional[Any] = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
__lowerCAmelCase : Dict = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowerCAmelCase : Optional[int] = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase : int = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
__lowerCAmelCase : Dict = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase : Dict = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
__lowerCAmelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase : Any = False
self.assertTrue(_snake_case )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict ) -> Optional[Any]:
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
@require_tf
class snake_case_ ( unittest.TestCase ):
A_ = 99
def UpperCAmelCase__ ( self : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : str = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase : Optional[Any] = input_ids.shape[0]
__lowerCAmelCase : str = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__lowerCAmelCase : int = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCAmelCase : str = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase : List[str] = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
__lowerCAmelCase : Union[str, Any] = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
__lowerCAmelCase : Union[str, Any] = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
__lowerCAmelCase : str = tf.function(_snake_case , jit_compile=_snake_case )
__lowerCAmelCase : List[str] = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple )->Dict:
'''simple docstring'''
super().setUp()
__lowerCAmelCase : Optional[int] = """facebook/opt-350m"""
def UpperCAmelCase__ ( self : Optional[int] )->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase : Optional[int] = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase : Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase : Tuple = tokenizer(_snake_case , return_tensors="""tf""" , padding=_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : Tuple = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase : Optional[Any] = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
__lowerCAmelCase : Tuple = tf.function(_snake_case , jit_compile=_snake_case )
__lowerCAmelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase__ ( self : Tuple )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = """facebook/opt-125m"""
__lowerCAmelCase : Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCAmelCase : int = []
__lowerCAmelCase : Any = GPTaTokenizer.from_pretrained(_snake_case )
__lowerCAmelCase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowerCAmelCase : Union[str, Any] = tokenizer(_snake_case , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Tuple = model.generate(_snake_case , max_length=10 )
__lowerCAmelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = """facebook/opt-350m"""
__lowerCAmelCase : Any = GPTaTokenizer.from_pretrained(_snake_case )
__lowerCAmelCase : Dict = TFOPTForCausalLM.from_pretrained(_snake_case )
__lowerCAmelCase : int = """left"""
# use different length sentences to test batching
__lowerCAmelCase : Any = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__lowerCAmelCase : Tuple = tokenizer(_snake_case , return_tensors="""tf""" , padding=_snake_case )
__lowerCAmelCase : int = inputs["""input_ids"""]
__lowerCAmelCase : Union[str, Any] = model.generate(input_ids=_snake_case , attention_mask=inputs["""attention_mask"""] )
__lowerCAmelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Tuple = model.generate(input_ids=_snake_case )
__lowerCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__lowerCAmelCase : List[str] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Union[str, Any] = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__lowerCAmelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
__lowerCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
__lowerCAmelCase : Union[str, Any] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = """facebook/opt-350m"""
__lowerCAmelCase : str = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Optional[int] = GPTaTokenizer.from_pretrained(_snake_case )
__lowerCAmelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowerCAmelCase : Dict = tokenizer(_snake_case , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Union[str, Any] = model.generate(_snake_case , max_length=10 )
__lowerCAmelCase : int = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 504
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_A , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_A , '''num_encoder_blocks''' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=64 , _A=3 , _A=4 , _A=[2, 2, 2, 2] , _A=[8, 4, 2, 1] , _A=[16, 32, 64, 128] , _A=[1, 4, 8, 16] , _A=[1, 2, 4, 8] , _A=True , _A=True , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.02 , _A=3 , _A=None , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = downsampling_rates
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> List[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = SegformerModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCamelCase ( self , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE_ = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCamelCase ( self , _A , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_A )
SCREAMING_SNAKE_CASE_ = model(_A , labels=_A )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ =(
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ =True
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = SegformerModelTester(self )
SCREAMING_SNAKE_CASE_ = SegformerConfigTester(self , config_class=_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def _UpperCamelCase ( self ) -> str:
pass
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
SCREAMING_SNAKE_CASE_ = sum(self.model_tester.depths )
self.assertEqual(len(_A ) , _A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(_A ) , _A )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 32) ** 2
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
SCREAMING_SNAKE_CASE_ = len(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 1 , len(_A ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(_A ) , _A )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCamelCase ( self ) -> int:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ):
continue
SCREAMING_SNAKE_CASE_ = model_class(_A )
model.to(_A )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A , return_labels=_A )
SCREAMING_SNAKE_CASE_ = model(**_A ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ) -> int:
pass
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = SegformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
# only resize + normalize
SCREAMING_SNAKE_CASE_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A )
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_A )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> int:
# only resize + normalize
SCREAMING_SNAKE_CASE_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A )
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_A )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1 ) )
@slow
def _UpperCamelCase ( self ) -> Any:
# only resize + normalize
SCREAMING_SNAKE_CASE_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A )
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_A )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _A )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_A )
SCREAMING_SNAKE_CASE_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _A )
| 702
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
SCREAMING_SNAKE_CASE_ = False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
| 0
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def __lowerCAmelCase ( __UpperCamelCase : Callable ):
'''simple docstring'''
@wraps(__UpperCamelCase )
def _inner_fn(*__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , __UpperCamelCase , )
return fn(*__UpperCamelCase , **__UpperCamelCase )
return _inner_fn
| 58
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =CTRLTokenizer
__lowerCAmelCase : int =False
__lowerCAmelCase : Union[str, Any] =False
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase =dict(zip(snake_case, range(len(snake_case))))
_lowercase =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase ={'unk_token': '<unk>'}
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def UpperCamelCase__ ( self :Union[str, Any], **snake_case :Optional[Any]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **snake_case)
def UpperCamelCase__ ( self :Tuple, snake_case :str):
"""simple docstring"""
_lowercase ='adapt react readapt apt'
_lowercase ='adapt react readapt apt'
return input_text, output_text
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase ='adapt react readapt apt'
_lowercase ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase =tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case, snake_case)
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case), snake_case)
| 557
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =CTRLTokenizer
__lowerCAmelCase : int =False
__lowerCAmelCase : Union[str, Any] =False
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase =dict(zip(snake_case, range(len(snake_case))))
_lowercase =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase ={'unk_token': '<unk>'}
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def UpperCamelCase__ ( self :Union[str, Any], **snake_case :Optional[Any]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **snake_case)
def UpperCamelCase__ ( self :Tuple, snake_case :str):
"""simple docstring"""
_lowercase ='adapt react readapt apt'
_lowercase ='adapt react readapt apt'
return input_text, output_text
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase ='adapt react readapt apt'
_lowercase ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase =tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case, snake_case)
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case), snake_case)
| 557
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int ):
lowerCAmelCase = word.split()
def justify(_UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
lowerCAmelCase = max_width - width
lowerCAmelCase = len(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase = []
for i in range(_UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = 0
for word in words:
if width + len(_UpperCAmelCase ) + len(_UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_UpperCAmelCase )
width += len(_UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
# reset new line and new width
lowerCAmelCase ,lowerCAmelCase = [word], len(_UpperCAmelCase )
lowerCAmelCase = max_width - width - len(_UpperCAmelCase )
answer.append(' '.join(_UpperCAmelCase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/pegasus-xsum''': 512,
}
class _UpperCamelCase( snake_case__ ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = PegasusTokenizer
__SCREAMING_SNAKE_CASE : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<mask_2>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask_1>" , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=1_0_3 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_A , _A ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_A )}, but is'''
f''' {type(_A )}''' )
__a : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_A ) , self.offset - 1 )
]
if len(set(_A ) ) != len(_A ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__a : Optional[int] = additional_special_tokens_extended
else:
__a : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_A , tokenizer_file=_A , pad_token=_A , eos_token=_A , unk_token=_A , mask_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , **_A , )
__a : Union[str, Any] = vocab_file
__a : Dict = False if not self.vocab_file else True
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_A )
elif token_ids_a is None:
return self._special_token_mask(_A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Dict = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 711
|
import torch
from transformers import AutoModel
class _UpperCamelCase( torch.nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
__a : List[str] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1e-08 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.bert(**SCREAMING_SNAKE_CASE__ ).last_hidden_state
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = W_supports['sizes'].tolist()
__a : Dict = W_supports['start_token_id'].item()
__a : Tuple = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Dict = None
__a : str = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Any = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
__a : str = 0
else:
__a : str = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Dict = S[s : s + size][end_token_masks[s : s + size]]
__a : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : List[Any] = torch.vstack((p_starts, p_start) )
__a : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
__a : str = p_start
__a : List[Any] = p_end
return p_starts, p_ends
| 577
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__snake_case : int = 'Create a default config file for Accelerate with only a few flags set.'
def __lowerCamelCase ( __snake_case : str="no", __snake_case : str = default_json_config_file, __snake_case : bool = False ) -> Any:
"""simple docstring"""
A__ : List[Any] =Path(__snake_case )
path.parent.mkdir(parents=__snake_case, exist_ok=__snake_case )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
A__ : List[Any] =mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
A__ : str ={
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
A__ : Optional[Any] =torch.cuda.device_count()
A__ : List[Any] =num_gpus
A__ : Union[str, Any] =False
if num_gpus > 1:
A__ : Any ="""MULTI_GPU"""
else:
A__ : Dict ="""NO"""
elif is_xpu_available() and use_xpu:
A__ : int =torch.xpu.device_count()
A__ : Optional[int] =num_xpus
A__ : Optional[Any] =False
if num_xpus > 1:
A__ : Optional[int] ="""MULTI_XPU"""
else:
A__ : int ="""NO"""
elif is_npu_available():
A__ : Dict =torch.npu.device_count()
A__ : Any =num_npus
A__ : Any =False
if num_npus > 1:
A__ : Optional[int] ="""MULTI_NPU"""
else:
A__ : Dict ="""NO"""
else:
A__ : Tuple =0
A__ : Tuple =True
A__ : List[str] =1
A__ : Optional[Any] ="""NO"""
A__ : Optional[int] =ClusterConfig(**__snake_case )
config.to_json_file(__snake_case )
return path
def __lowerCamelCase ( __snake_case : int, __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ : str =parser.add_parser("""default""", parents=__snake_case, help=__snake_case, formatter_class=__snake_case )
parser.add_argument(
"""--config_file""", default=__snake_case, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), dest="""save_location""", )
parser.add_argument(
"""--mixed_precision""", choices=["""no""", """fp16""", """bf16"""], type=__snake_case, help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""", default="""no""", )
parser.set_defaults(func=__snake_case )
return parser
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 215
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( __snake_case : Tuple ) -> Dict:
"""simple docstring"""
A__ , A__ : Dict =image.size
A__ , A__ : Optional[Any] =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A__ : Tuple =image.resize((w, h), resample=PIL_INTERPOLATION["""lanczos"""] )
A__ : List[Any] =np.array(__snake_case ).astype(np.floataa ) / 2_55.0
A__ : List[str] =image[None].transpose(0, 3, 1, 2 )
A__ : Dict =torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : VQModel , lowerCAmelCase_ : UNetaDModel , lowerCAmelCase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[int] = 1_00 , lowerCAmelCase_ : Optional[float] = 0.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
A__ : Optional[int] =1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
A__ : Union[str, Any] =image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}" )
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
A__ : Optional[int] =preprocess(lowerCAmelCase_ )
A__ , A__ : Tuple =image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A__ : Union[str, Any] =(batch_size, self.unet.config.in_channels // 2, height, width)
A__ : Tuple =next(self.unet.parameters() ).dtype
A__ : Union[str, Any] =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
A__ : int =image.to(device=self.device , dtype=lowerCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
A__ : Union[str, Any] =self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A__ : Optional[int] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Tuple ="""eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Tuple ={}
if accepts_eta:
A__ : List[str] =eta
for t in self.progress_bar(lowerCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
A__ : Any =torch.cat([latents, image] , dim=1 )
A__ : int =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
A__ : Tuple =self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ : Any =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
A__ : Optional[Any] =self.vqvae.decode(lowerCAmelCase_ ).sample
A__ : List[str] =torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 )
A__ : Optional[int] =image / 2 + 0.5
A__ : str =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Tuple =self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 215
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _a , _a , _a , unittest.TestCase):
_UpperCAmelCase : Any = AltDiffusionPipeline
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=3_2 ,)
UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=__SCREAMING_SNAKE_CASE ,set_alpha_to_one=__SCREAMING_SNAKE_CASE ,)
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,projection_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_0_0_2 ,)
UpperCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase = 7_7
UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self : int ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase = RobertaSeriesConfig(
hidden_size=3_2 ,project_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5_0_0_2 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = text_encoder
UpperCAmelCase = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = "A photo of an astronaut"
UpperCAmelCase = alt_pipe(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
UpperCAmelCase = RobertaSeriesConfig(
hidden_size=3_2 ,project_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5_0_0_2 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = text_encoder
UpperCAmelCase = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = alt_pipe(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def _UpperCAmelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[Any] ):
# make sure here that pndm scheduler skips prk
UpperCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,safety_checker=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = "A painting of a squirrel eating a burger"
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = alt_pipe([prompt] ,generator=__SCREAMING_SNAKE_CASE ,guidance_scale=6.0 ,num_inference_steps=2_0 ,output_type="np" )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" ,subfolder="scheduler" )
UpperCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,scheduler=__SCREAMING_SNAKE_CASE ,safety_checker=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = "A painting of a squirrel eating a burger"
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = alt_pipe([prompt] ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,output_type="numpy" )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 405
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__lowerCAmelCase =None
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase ={
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__lowerCAmelCase ="▁"
class __magic_name__ ( _a):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : Tuple=None ,__SCREAMING_SNAKE_CASE : int=None ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : Optional[int]=False ,__SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" ,__SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" ,__SCREAMING_SNAKE_CASE : Any="<unk>" ,__SCREAMING_SNAKE_CASE : int="[SEP]" ,__SCREAMING_SNAKE_CASE : Union[str, Any]="<pad>" ,__SCREAMING_SNAKE_CASE : str="[CLS]" ,__SCREAMING_SNAKE_CASE : Tuple="[MASK]" ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE ,lstrip=__SCREAMING_SNAKE_CASE ,rstrip=__SCREAMING_SNAKE_CASE ,normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,do_lower_case=__SCREAMING_SNAKE_CASE ,remove_space=__SCREAMING_SNAKE_CASE ,keep_accents=__SCREAMING_SNAKE_CASE ,bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCAmelCase ( self : Any ,__SCREAMING_SNAKE_CASE : List[int] ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[int] ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 405
| 1
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Optional[int] = 16
a_ : Any = 32
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCAmelCase):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'])
SCREAMING_SNAKE_CASE = int(config['seed'])
SCREAMING_SNAKE_CASE = int(config['batch_size'])
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device)
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase)
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Now we train the model
for epoch in range(_UpperCAmelCase):
model.train()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """time_series_transformer"""
__lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_snake_case = embedding_dimension
else:
_snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(lowerCAmelCase_ ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 495
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=True , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size_divisor
snake_case__ = do_rescale
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def A_ ( self ):
snake_case__ = GLPNImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size_divisor" ) )
self.assertTrue(hasattr(lowerCamelCase , "resample" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 530
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__magic_name__ = parser.parse_args()
__magic_name__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 530
| 1
|
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in range(0 , UpperCamelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in range(UpperCamelCase_ , 0 , -1 ):
for _ in range(UpperCamelCase_ , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def _lowerCAmelCase ( UpperCamelCase_ ):
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(UpperCamelCase_ ) # upper half
reverse_floyd(UpperCamelCase_ ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__magic_name__ = 1
while K:
__magic_name__ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__magic_name__ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 155
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__magic_name__ = datasets.load_iris()
__magic_name__ = np.array(data["data"])
__magic_name__ = np.array(data["target"])
__magic_name__ = data["target_names"]
__magic_name__, __magic_name__, __magic_name__, __magic_name__ = train_test_split(X, y)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return np.linalg.norm(np.array(UpperCamelCase_ ) - np.array(UpperCamelCase_ ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=5 ):
__SCREAMING_SNAKE_CASE = zip(UpperCamelCase_ , UpperCamelCase_ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE = []
for data_point in data:
__SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , UpperCamelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE = [i[1] for i in sorted(UpperCamelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE = Counter(UpperCamelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 155
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A = logging.getLogger(__name__)
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
# save results
if os.path.exists(lowerCamelCase__ ):
if os.path.exists(os.path.join(lowerCamelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
A = 2
if unlogit:
A = torch.pow(lowerCamelCase__ , lowerCamelCase__ )
A = p * torch.log(lowerCamelCase__ )
A = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Optional[Any]:
logger.info('lv, h >\t' + '\t'.join(f"""{x + 1}""" for x in range(len(lowerCamelCase__ ) ) ) )
for row in range(len(lowerCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '\t'.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '\t'.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=False ) -> List[Any]:
A , A = model.config.num_hidden_layers, model.config.num_attention_heads
A = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
A = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
if head_mask is None:
A = torch.ones(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A = None
A = 0.0
A = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
A = tuple(t.to(args.device ) for t in inputs )
((A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A = model(lowerCamelCase__ , labels=lowerCamelCase__ , head_mask=lowerCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A , A , A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase__ ):
A = entropy(attn.detach() , lowerCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A = 2
A = torch.pow(torch.pow(lowerCamelCase__ , lowerCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase__ )
logger.info('Head ranked by importance scores' )
A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A = torch.arange(
head_importance.numel() , device=args.device )
A = head_ranks.view_as(lowerCamelCase__ )
print_ad_tensor(lowerCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
A , A , A = compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ )
A = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase__ , original_score * args.masking_threshold )
A = torch.ones_like(lowerCamelCase__ )
A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A = original_score
while current_score >= original_score * args.masking_threshold:
A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A = float('Inf' )
A = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
A = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
A = new_head_mask.view(-1 )
A = 0.0
A = new_head_mask.view_as(lowerCamelCase__ )
A = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase__ )
# Compute metric and head importance again
A , A , A = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , head_mask=lowerCamelCase__ )
A = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
A = datetime.now()
A , A , A = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ )
A = 1 / loss
A = datetime.now() - before_time
A = sum(p.numel() for p in model.parameters() )
A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A = [
v,
]
assert sum(len(lowerCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase__ )
A = sum(p.numel() for p in model.parameters() )
A = datetime.now()
A , A , A = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ , actually_pruned=lowerCamelCase__ , )
A = 1 / loss
A = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase__ , lowerCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase__ , lowerCamelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(lowerCamelCase__ , args.output_dir )
def lowerCAmelCase__ ( ) -> List[str]:
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase__ , type=lowerCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase__ , default=42 )
parser.add_argument('--local_rank' , type=lowerCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase__ , default='' , help='Can be used for distant debugging.' )
A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A = torch.device('cuda' , args.local_rank )
A = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A = nn.parallel.DistributedDataParallel(
lowerCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase__ )
elif args.n_gpu > 1:
A = nn.DataParallel(lowerCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase__ )
torch.save(lowerCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase__ )
# Prepare dataset
A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A = (torch.from_numpy(lowerCamelCase__ ),)
A = TensorDataset(*lowerCamelCase__ )
A = RandomSampler(lowerCamelCase__ )
A = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A = mask_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
prune_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 109
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
A = tempfile.mkdtemp()
A = BlipImageProcessor()
A = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
A = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
A = InstructBlipProcessor(snake_case , snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] , **snake_case : str ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def A_ ( self : int , **snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def A_ ( self : Any , **snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).qformer_tokenizer
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
self.assertIsInstance(processor.qformer_tokenizer , snake_case )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = self.prepare_image_inputs()
A = image_processor(snake_case , return_tensors='np' )
A = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = processor(text=snake_case )
A = tokenizer(snake_case , return_token_type_ids=snake_case )
A = qformer_tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=snake_case , images=snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(snake_case )
A = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=snake_case , images=snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 109
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCAmelCase = 0
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCAmelCase = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = pos_x
lowercase = pos_y
lowercase = (pos_y, pos_x)
lowercase = goal_x
lowercase = goal_y
lowercase = g_cost
lowercase = parent
lowercase = self.calculate_heuristic()
lowercase = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.pos_x - self.goal_x
lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case ) + abs(snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case ):
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case )
lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , snake_case )
lowercase = [self.start]
lowercase = []
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case )
self.closed_nodes.append(snake_case )
lowercase = self.get_successors(snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case )
else:
# retrieve the best current path
lowercase = self.open_nodes.pop(self.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case )
else:
self.open_nodes.append(snake_case )
return [self.start.pos]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
for action in delta:
lowercase = parent.pos_x + action[1]
lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case , snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case , ) )
return successors
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = node
lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = AStar(snake_case , snake_case )
lowercase = AStar(snake_case , snake_case )
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase = self.fwd_astar.open_nodes.pop(0 )
lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case , snake_case )
self.fwd_astar.closed_nodes.append(snake_case )
self.bwd_astar.closed_nodes.append(snake_case )
lowercase = current_bwd_node
lowercase = current_fwd_node
lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case )
else:
# retrieve the best current path
lowercase = astar.open_nodes.pop(
astar.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case )
else:
astar.open_nodes.append(snake_case )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.fwd_astar.retrace_path(snake_case )
lowercase = self.bwd_astar.retrace_path(snake_case )
bwd_path.pop()
bwd_path.reverse()
lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase = time.time()
UpperCAmelCase = AStar(init, goal)
UpperCAmelCase = a_star.search()
UpperCAmelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
UpperCAmelCase = time.time()
UpperCAmelCase = BidirectionalAStar(init, goal)
UpperCAmelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 84
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def __a ( _UpperCamelCase: Any , _UpperCamelCase: List[Any] , _UpperCamelCase: Union[str, Any]=8 ) -> Optional[int]:
"""simple docstring"""
_snake_case = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_snake_case = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _a ( __lowerCAmelCase ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE ,tokenizer=_SCREAMING_SNAKE_CASE ,unet=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,movq=_SCREAMING_SNAKE_CASE ,)
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
if latents is None:
_snake_case = randn_tensor(_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_snake_case = latents.to(_SCREAMING_SNAKE_CASE )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,) -> Union[str, Any]:
_snake_case = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
_snake_case = self.tokenizer(
_SCREAMING_SNAKE_CASE ,padding="max_length" ,truncation=_SCREAMING_SNAKE_CASE ,max_length=77 ,return_attention_mask=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
_snake_case = text_inputs.input_ids
_snake_case = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding="longest" ,return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_snake_case = text_input_ids.to(_SCREAMING_SNAKE_CASE )
_snake_case = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
_snake_case = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
if do_classifier_free_guidance:
_snake_case = 42
if negative_prompt is None:
_snake_case = [""] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="""
f""" {type(_SCREAMING_SNAKE_CASE )}.""" )
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
_snake_case = negative_prompt
_snake_case = self.tokenizer(
_SCREAMING_SNAKE_CASE ,padding="max_length" ,max_length=77 ,truncation=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
_snake_case = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
_snake_case = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_snake_case = negative_prompt_embeds.shape[1]
_snake_case = negative_prompt_embeds.repeat(1 ,_SCREAMING_SNAKE_CASE )
_snake_case = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,_SCREAMING_SNAKE_CASE )
_snake_case = uncond_text_encoder_hidden_states.shape[1]
_snake_case = uncond_text_encoder_hidden_states.repeat(1 ,_SCREAMING_SNAKE_CASE ,1 )
_snake_case = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,_SCREAMING_SNAKE_CASE ,-1 )
_snake_case = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case = torch.cat([negative_prompt_embeds, prompt_embeds] )
_snake_case = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_snake_case = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _lowercase ( self ,_SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
_snake_case = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_snake_case , _snake_case = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
_snake_case , _snake_case = cpu_offload_with_hook(self.safety_checker ,_SCREAMING_SNAKE_CASE ,prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> Optional[int]:
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 512 ,_SCREAMING_SNAKE_CASE = 512 ,_SCREAMING_SNAKE_CASE = 100 ,_SCREAMING_SNAKE_CASE = 4.0 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "pil" ,_SCREAMING_SNAKE_CASE = True ,) -> Union[str, Any]:
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = 1
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}""" )
_snake_case = self._execution_device
_snake_case = batch_size * num_images_per_prompt
_snake_case = guidance_scale > 1.0
_snake_case , _snake_case , _snake_case = self._encode_prompt(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = torch.cat(_SCREAMING_SNAKE_CASE ,dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = torch.cat(_SCREAMING_SNAKE_CASE ,dim=0 )
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE )
_snake_case = self.scheduler.timesteps
_snake_case = self.unet.config.in_channels
_snake_case , _snake_case = get_new_h_w(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.movq_scale_factor )
# create initial latent
_snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
_snake_case = self.unet(
sample=_SCREAMING_SNAKE_CASE ,timestep=_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE ,added_cond_kwargs=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.split(latents.shape[1] ,dim=1 )
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case , _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,).prev_sample
# post-processing
_snake_case = self.movq.decode(_SCREAMING_SNAKE_CASE ,force_not_quantize=_SCREAMING_SNAKE_CASE )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 ,1 )
_snake_case = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 185
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 711
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a :List[Any] = None
a :Optional[int] = logging.get_logger(__name__)
a :Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a :Optional[int] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
a :Dict = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
a :int = "▁"
# Segments (not really needed)
a :Dict = 0
a :Optional[int] = 1
a :Tuple = 2
a :List[str] = 3
a :Optional[Any] = 4
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = """left"""
_SCREAMING_SNAKE_CASE :Optional[Any] = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE__ : List[str] = remove_space
SCREAMING_SNAKE_CASE__ : int = keep_accents
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = False if not self.vocab_file else True
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 12
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=__SCREAMING_SNAKE_CASE , )
assert hasattr(self , """env""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=__SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=__SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
TrainingJobAnalytics(__SCREAMING_SNAKE_CASE ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def __UpperCamelCase ( self ):
# create estimator
snake_case__ : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case__ : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case__ : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __SCREAMING_SNAKE_CASE )
| 38
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38
| 1
|
"""simple docstring"""
import os
def _snake_case ( ) -> str:
with open(os.path.dirname(lowerCamelCase__ ) + "/grid.txt" ) as f:
lowerCamelCase_ : str =[] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase_ : Optional[int] =0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase_ : Dict =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase_ : Union[str, Any] =temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase_ : Optional[int] =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase_ : List[str] =temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase_ : List[str] =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase_ : List[Any] =temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase_ : Any =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase_ : Optional[int] =temp
return maximum
if __name__ == "__main__":
print(solution())
| 244
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( snake_case__, unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[Any] =ort.SessionOptions()
lowerCamelCase_ : str =False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Tuple =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowerCamelCase_ : Union[str, Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowerCamelCase_ : str =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : str ="A red cat sitting on a park bench"
lowerCamelCase_ : Optional[int] =np.random.RandomState(0 )
lowerCamelCase_ : int =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Tuple =output.images
lowerCamelCase_ : str =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ : List[str] =np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowerCamelCase_ : Union[str, Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowerCamelCase_ : Optional[Any] =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : Any =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] ="A red cat sitting on a park bench"
lowerCamelCase_ : Tuple =np.random.RandomState(0 )
lowerCamelCase_ : Optional[int] =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Union[str, Any] =output.images
lowerCamelCase_ : Any =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ : str =np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 244
| 1
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , a : List[Any] = None )-> None:
"""simple docstring"""
if components is None:
lowercase__ = []
lowercase__ = list(lowerCAmelCase__ )
def __len__( self : int )-> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : Dict )-> str:
"""simple docstring"""
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self : Optional[int] , a : Dict )-> Vector:
"""simple docstring"""
lowercase__ = len(self )
if size == len(lowerCAmelCase__ ):
lowercase__ = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Optional[int] , a : str )-> Vector:
"""simple docstring"""
lowercase__ = len(self )
if size == len(lowerCAmelCase__ ):
lowercase__ = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Union[str, Any] , a : Union[str, Any] )-> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : Optional[Any] , a : Union[str, Any] )-> float:
"""simple docstring"""
...
def __mul__( self : str , a : Tuple )-> float | Vector:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (float, int) ):
lowercase__ = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
lowercase__ = len(self )
lowercase__ = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception('invalid operand!' )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Vector:
"""simple docstring"""
return Vector(self.__components )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Optional[Any] )-> float:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str , a : List[Any] )-> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ = value
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
lowercase__ = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : str , a : List[Any] = False )-> float:
"""simple docstring"""
lowercase__ = self * other
lowercase__ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return Vector([0] * dimension )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ))
lowercase__ = [0] * dimension
lowercase__ = 1
return Vector(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (isinstance(_SCREAMING_SNAKE_CASE , (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
random.seed(_SCREAMING_SNAKE_CASE )
lowercase__ = [random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )]
return Vector(_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : List[str] , a : str , a : List[str] )-> None:
"""simple docstring"""
lowercase__ = matrix
lowercase__ = w
lowercase__ = h
def __str__( self : Union[str, Any] )-> str:
"""simple docstring"""
lowercase__ = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , a : Optional[Any] )-> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowercase__ = []
for i in range(self.__height ):
lowercase__ = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Any , a : Optional[int] )-> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowercase__ = []
for i in range(self.__height ):
lowercase__ = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : Tuple , a : Tuple )-> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : List[Any] , a : Dict )-> Vector:
"""simple docstring"""
...
def __mul__( self : Optional[int] , a : Tuple )-> Vector | Matrix:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
lowercase__ = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
lowercase__ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
return self.__height
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
return self.__width
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : str )-> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[str] , a : Optional[int] , a : str )-> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ = value
else:
raise Exception('change_component: indices out of bounds' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[str] , a : List[Any] )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
lowercase__ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
lowercase__ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Any , a : Tuple )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception('Indices out of bounds' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = [[0] * n for _ in range(_SCREAMING_SNAKE_CASE )]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
random.seed(_SCREAMING_SNAKE_CASE )
lowercase__ = [
[random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )
]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 235
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowercase = v
else:
__lowercase = v
__lowercase = chkpt['''params''']
__lowercase = {n: v for n, v in config.items() if not isinstance(lowercase , (torch.FloatTensor, numpy.ndarray) )}
__lowercase = chkpt['''dico_word2id''']
__lowercase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__lowercase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(lowercase , lowercase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '''\n''' )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 534
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_a : str = logging.get_logger(__name__)
_a : str = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''bart'''
A = ['''past_key_values''']
A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , UpperCAmelCase=5_0_2_6_5 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=1_2 , UpperCAmelCase=4_0_9_6 , UpperCAmelCase=1_6 , UpperCAmelCase=1_2 , UpperCAmelCase=4_0_9_6 , UpperCAmelCase=1_6 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase="gelu" , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=2 , UpperCAmelCase=2 , **UpperCAmelCase , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase ):
__lowerCamelCase = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCamelCase = {0: """batch"""}
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCamelCase , __lowerCamelCase = self.num_layers
for i in range(UpperCAmelCase ):
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCamelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = super().outputs
else:
__lowerCamelCase = super(UpperCAmelCase , self ).outputs
if self.use_past:
__lowerCamelCase , __lowerCamelCase = self.num_layers
for i in range(UpperCAmelCase ):
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Generate decoder inputs
__lowerCamelCase = seq_length if not self.use_past else 1
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase = dict(**UpperCAmelCase , **UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCamelCase , __lowerCamelCase = common_inputs["""input_ids"""].shape
__lowerCamelCase = common_inputs["""decoder_input_ids"""].shape[1]
__lowerCamelCase , __lowerCamelCase = self.num_attention_heads
__lowerCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase = decoder_seq_length + 3
__lowerCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 )
__lowerCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase , __lowerCamelCase = self.num_layers
__lowerCamelCase = min(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers
__lowerCamelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
) )
# TODO: test this.
__lowerCamelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCAmelCase , UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) )
return common_inputs
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCamelCase , __lowerCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCamelCase = seqlen + 2
__lowerCamelCase , __lowerCamelCase = self.num_layers
__lowerCamelCase , __lowerCamelCase = self.num_attention_heads
__lowerCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase = common_inputs["""attention_mask"""].dtype
__lowerCamelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
__lowerCamelCase = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase )
]
return common_inputs
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase = tokenizer.num_special_tokens_to_add(UpperCAmelCase )
__lowerCamelCase = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase = dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return common_inputs
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
elif self.task == "causal-lm":
__lowerCamelCase = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
else:
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
return common_inputs
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
__lowerCamelCase = super(UpperCAmelCase , self )._flatten_past_key_values_(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 710
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase__ ( _A: Optional[Any] , _A: Any ):
'''simple docstring'''
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
__lowerCamelCase = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowerCamelCase = transform(_A ).unsqueeze(0 ).to(_A )
return image
def UpperCamelCase__ ( _A: Any ):
'''simple docstring'''
if "visual_encoder" in key:
__lowerCamelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _A )
if "blocks" in key:
__lowerCamelCase = re.sub(R"""blocks""" , """layers""" , _A )
if "attn" in key:
__lowerCamelCase = re.sub(R"""attn""" , """self_attn""" , _A )
if "norm1" in key:
__lowerCamelCase = re.sub(R"""norm1""" , """layer_norm1""" , _A )
if "norm2" in key:
__lowerCamelCase = re.sub(R"""norm2""" , """layer_norm2""" , _A )
if "encoder.norm" in key:
__lowerCamelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , _A )
if "encoder.patch_embed.proj" in key:
__lowerCamelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _A )
if "encoder.pos_embed" in key:
__lowerCamelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _A )
if "encoder.cls_token" in key:
__lowerCamelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _A )
if "self_attn" in key:
__lowerCamelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _A )
return key
@torch.no_grad()
def UpperCamelCase__ ( _A: List[Any] , _A: Dict=None ):
'''simple docstring'''
if config_path is not None:
__lowerCamelCase = BlipConfig.from_pretrained(_A )
else:
__lowerCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__lowerCamelCase = BlipForConditionalGeneration(_A ).eval()
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
__lowerCamelCase = blip_decoder(pretrained=_A , image_size=384 , vit="""base""" )
__lowerCamelCase = pt_model.eval()
__lowerCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
hf_model.load_state_dict(_A )
__lowerCamelCase = 384
__lowerCamelCase = load_demo_image(image_size=_A , device="""cpu""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = tokenizer(["""a picture of"""] ).input_ids
__lowerCamelCase = hf_model.generate(_A , _A )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__lowerCamelCase = hf_model.generate(_A )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_A )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowerCamelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
__lowerCamelCase = blip_vqa(pretrained=_A , image_size=_A , vit="""base""" )
vqa_model.eval()
__lowerCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
__lowerCamelCase = BlipForQuestionAnswering(_A )
hf_vqa_model.load_state_dict(_A )
__lowerCamelCase = ["""How many dogs are in this image?"""]
__lowerCamelCase = tokenizer(_A , return_tensors="""pt""" ).input_ids
__lowerCamelCase = hf_vqa_model.generate(_A , _A )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
__lowerCamelCase = blip_itm(pretrained=_A , image_size=_A , vit="""base""" )
itm_model.eval()
__lowerCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
__lowerCamelCase = BlipForImageTextRetrieval(_A )
__lowerCamelCase = ["""A picture of a woman with a dog sitting in a beach"""]
__lowerCamelCase = tokenizer(
_A , return_tensors="""pt""" , padding="""max_length""" , truncation=_A , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_A )
hf_itm_model.eval()
__lowerCamelCase = hf_itm_model(_A , _A , use_itm_head=_A )
__lowerCamelCase = hf_itm_model(_A , _A , use_itm_head=_A )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : Optional[int] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 571
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374
|
'''simple docstring'''
__a = "Alexander Joslin"
import operator as op
from .stack import Stack
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : str = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
snake_case__ : Stack[int] = Stack()
snake_case__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ : Union[str, Any] = operator_stack.peek()
operator_stack.pop()
snake_case__ : Any = operand_stack.peek()
operand_stack.pop()
snake_case__ : str = operand_stack.peek()
operand_stack.pop()
snake_case__ : Optional[Any] = operators[opr](_lowerCAmelCase , _lowerCAmelCase )
operand_stack.push(_lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__a = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 374
| 1
|
"""simple docstring"""
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : List[Any] = first_str.lower().strip()
_lowerCAmelCase : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase : str = first_str.replace(""" """ ,"""""" )
_lowerCAmelCase : Any = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase : defaultdict[str, int] = defaultdict(_lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : Union[str, Any] = input('Enter the first string ').strip()
_a : Union[str, Any] = input('Enter the second string ').strip()
_a : List[str] = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663
| 0
|
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 86
|
import random
from typing import Any
def __lowerCamelCase ( lowerCamelCase__ : list ):
'''simple docstring'''
for _ in range(len(lowerCamelCase__ ) ):
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase , lowerCamelCase = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase : str = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : Union[str, Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 457
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCamelCase( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 42
__SCREAMING_SNAKE_CASE : int = 42
__SCREAMING_SNAKE_CASE : Tuple = None
class _UpperCamelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict = 0.02 , SCREAMING_SNAKE_CASE__ : Dict = 1_0_0 , SCREAMING_SNAKE_CASE__ : Tuple = 1.007 , SCREAMING_SNAKE_CASE__ : Dict = 8_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.05 , SCREAMING_SNAKE_CASE__ : List[str] = 5_0 , ):
'''simple docstring'''
__a : int = sigma_max
# setable values
__a : int = None
__a : np.IntTensor = None
__a : torch.FloatTensor = None # sigma(t_i)
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] = None ):
'''simple docstring'''
return sample
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] = None ):
'''simple docstring'''
__a : List[Any] = num_inference_steps
__a : Any = np.arange(0 , self.num_inference_steps )[::-1].copy()
__a : Tuple = torch.from_numpy(snake_case__ ).to(snake_case__ )
__a : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__a : int = torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__a : Optional[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__a : str = 0
# sample eps ~ N(0, S_noise^2 * I)
__a : List[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
__a : str = sigma + gamma * sigma
__a : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple = True , ):
'''simple docstring'''
__a : Union[str, Any] = sample_hat + sigma_hat * model_output
__a : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
__a : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict = True , ):
'''simple docstring'''
__a : int = sample_prev + sigma_prev * model_output
__a : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
__a : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
raise NotImplementedError()
| 719
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE : str = '''OwlViTImageProcessor'''
__SCREAMING_SNAKE_CASE : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = kwargs.pop('feature_extractor' )
__a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]="max_length" , SCREAMING_SNAKE_CASE__ : Optional[int]="np" , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(text[0] , SCREAMING_SNAKE_CASE__ )):
__a : Any = [self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(text[0] , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = []
# Maximum number of queries across batch
__a : List[str] = max([len(SCREAMING_SNAKE_CASE__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(SCREAMING_SNAKE_CASE__ ) != max_num_queries:
__a : Optional[int] = t + [' '] * (max_num_queries - len(SCREAMING_SNAKE_CASE__ ))
__a : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
encodings.append(SCREAMING_SNAKE_CASE__ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__a : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__a : str = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__a : Optional[Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__a : Tuple = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__a : Dict = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__a : Union[str, Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__a : Tuple = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__a : Tuple = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__a : Optional[Any] = BatchEncoding()
__a : Optional[Any] = input_ids
__a : List[Any] = attention_mask
if query_images is not None:
__a : str = BatchEncoding()
__a : Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).pixel_values
__a : Tuple = query_pixel_values
if images is not None:
__a : int = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
__a : Union[str, Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__a : Dict = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return self.image_processor.post_process(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 577
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 124
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class _snake_case ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = (16, 32, 96, 256)
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
__magic_name__ : Any = self.block_out_channels[i]
__magic_name__ : Optional[int] = self.block_out_channels[i + 1]
__magic_name__ : List[str] = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Optional[Any] = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Union[str, Any] = blocks
__magic_name__ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a ):
__magic_name__ : List[Any] = self.conv_in(_a )
__magic_name__ : Dict = nn.silu(_a )
for block in self.blocks:
__magic_name__ : Dict = block(_a )
__magic_name__ : int = nn.silu(_a )
__magic_name__ : str = self.conv_out(_a )
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , snake_case , snake_case ):
UpperCamelCase__ = 32
UpperCamelCase__ = 4
UpperCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ = False
UpperCamelCase__ = (320, 640, 1280, 1280)
UpperCamelCase__ = 2
UpperCamelCase__ = 8
UpperCamelCase__ = None
UpperCamelCase__ = 1280
UpperCamelCase__ = 0.0
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
UpperCamelCase__ = True
UpperCamelCase__ = 0
UpperCamelCase__ = "rgb"
UpperCamelCase__ = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE ( self , _a ):
# init input tensors
__magic_name__ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ : Dict = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ : int = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ : List[str] = (1, 3, self.sample_size * 8, self.sample_size * 8)
__magic_name__ : Optional[int] = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ , __magic_name__ : Dict = jax.random.split(_a )
__magic_name__ : str = {"params": params_rng, "dropout": dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.block_out_channels
__magic_name__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ : str = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ : str = FlaxTimestepEmbedding(_a , dtype=self.dtype )
__magic_name__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__magic_name__ : Tuple = self.only_cross_attention
if isinstance(_a , _a ):
__magic_name__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
__magic_name__ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ : List[Any] = []
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = block_out_channels[0]
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ : Optional[int] = output_channel
__magic_name__ : int = block_out_channels[i]
__magic_name__ : List[str] = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__magic_name__ : List[Any] = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
__magic_name__ : List[Any] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
__magic_name__ : Any = down_blocks
__magic_name__ : Any = controlnet_down_blocks
# mid
__magic_name__ : Optional[int] = block_out_channels[-1]
__magic_name__ : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__magic_name__ : Optional[int] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a , _a = 1.0 , _a = True , _a = False , ):
__magic_name__ : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__magic_name__ : int = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
__magic_name__ : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__magic_name__ : Dict = jnp.expand_dims(_a , 0 )
__magic_name__ : Any = self.time_proj(_a )
__magic_name__ : int = self.time_embedding(_a )
# 2. pre-process
__magic_name__ : Dict = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Any = self.conv_in(_a )
__magic_name__ : List[str] = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Tuple = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
__magic_name__ : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
__magic_name__ , __magic_name__ : List[str] = down_block(_a , _a , _a , deterministic=not train )
else:
__magic_name__ , __magic_name__ : List[Any] = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__magic_name__ : int = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
__magic_name__ : Any = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
__magic_name__ : Dict = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
__magic_name__ : Optional[Any] = controlnet_down_block_res_samples
__magic_name__ : int = self.controlnet_mid_block(_a )
# 6. scaling
__magic_name__ : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 124
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Any = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __snake_case ( a ):
UpperCAmelCase__ : int = '''canine'''
def __init__( self : str , _snake_case : str=768 , _snake_case : Optional[int]=12 , _snake_case : Any=12 , _snake_case : Dict=3072 , _snake_case : Dict="gelu" , _snake_case : Any=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[str]=16384 , _snake_case : int=16 , _snake_case : Any=0.0_2 , _snake_case : int=1e-12 , _snake_case : List[str]=0 , _snake_case : Union[str, Any]=0XE000 , _snake_case : List[str]=0XE001 , _snake_case : List[str]=4 , _snake_case : Any=4 , _snake_case : Optional[Any]=8 , _snake_case : List[Any]=16384 , _snake_case : Tuple=128 , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
# Character config:
UpperCAmelCase_ = downsampling_rate
UpperCAmelCase_ = upsampling_kernel_size
UpperCAmelCase_ = num_hash_functions
UpperCAmelCase_ = num_hash_buckets
UpperCAmelCase_ = local_transformer_stride
| 711
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : Optional[Any]=13 , _snake_case : Optional[int]=7 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : str=True , _snake_case : Union[str, Any]=True , _snake_case : Tuple=99 , _snake_case : int=16 , _snake_case : Union[str, Any]=36 , _snake_case : str=6 , _snake_case : str=6 , _snake_case : Union[str, Any]=6 , _snake_case : Optional[int]=37 , _snake_case : Optional[int]="gelu" , _snake_case : Optional[Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : str=512 , _snake_case : Optional[int]=16 , _snake_case : List[Any]=2 , _snake_case : int=0.0_2 , _snake_case : Optional[Any]=3 , _snake_case : str=4 , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase ( self : int , _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = AlbertForPreTraining(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , sentence_order_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : int , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = AlbertForMaskedLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : List[Any] , _snake_case : int , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertForQuestionAnswering(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : Tuple , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = True
def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class in get_values(_snake_case):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case)
return inputs_dict
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_snake_case)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''')
UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)[0]
UpperCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1e-4))
| 169
| 0
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__a = 10
def __UpperCAmelCase ( a_: int, a_: int, a_: list[int], a_: int ):
for i in range(_snake_case, _snake_case ):
if array[i] == target:
return i
return -1
def __UpperCAmelCase ( a_: list[int], a_: int ):
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Any = len(_snake_case )
while left <= right:
if right - left < precision:
return lin_search(_snake_case, _snake_case, _snake_case, _snake_case )
_UpperCAmelCase : Optional[int] = (left + right) // 3 + 1
_UpperCAmelCase : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCAmelCase : List[Any] = one_third - 1
elif array[two_third] < target:
_UpperCAmelCase : str = two_third + 1
else:
_UpperCAmelCase : Optional[int] = one_third + 1
_UpperCAmelCase : Dict = two_third - 1
else:
return -1
def __UpperCAmelCase ( a_: int, a_: int, a_: list[int], a_: int ):
if left < right:
if right - left < precision:
return lin_search(_snake_case, _snake_case, _snake_case, _snake_case )
_UpperCAmelCase : Tuple = (left + right) // 3 + 1
_UpperCAmelCase : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_snake_case, one_third - 1, _snake_case, _snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, _snake_case, _snake_case, _snake_case )
else:
return rec_ternary_search(one_third + 1, two_third - 1, _snake_case, _snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__a = int(input('Enter the number to be found in the list:\n').strip())
__a = ite_ternary_search(collection, target)
__a = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print('Not found')
| 494
|
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self :Dict, snake_case :str="sayef/fsner-bert-base-uncased"):
"""simple docstring"""
super(snake_case, self).__init__()
_lowercase =AutoModel.from_pretrained(snake_case, return_dict=snake_case)
_lowercase =torch.nn.CosineSimilarity(3, 1e-0_8)
_lowercase =torch.nn.Softmax(dim=1)
def UpperCamelCase__ ( self :str, **snake_case :int):
"""simple docstring"""
return self.bert(**snake_case).last_hidden_state
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Optional[Any]):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=snake_case)
def UpperCamelCase__ ( self :List[Any], snake_case :int, snake_case :Dict, snake_case :Dict=1):
"""simple docstring"""
return self.softmax(T * self.cos(snake_case, snake_case))
def UpperCamelCase__ ( self :List[str], snake_case :int, snake_case :List[str]):
"""simple docstring"""
_lowercase =W_supports['sizes'].tolist()
_lowercase =W_supports['start_token_id'].item()
_lowercase =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowercase =self.BERT(**snake_case)
_lowercase =self.BERT(**snake_case)
_lowercase =None
_lowercase =None
_lowercase =W_supports['input_ids'] == start_token_id
_lowercase =W_supports['input_ids'] == end_token_id
for i, size in enumerate(snake_case):
if i == 0:
_lowercase =0
else:
_lowercase =support_sizes[i - 1]
_lowercase =S[s : s + size][start_token_masks[s : s + size]]
_lowercase =S[s : s + size][end_token_masks[s : s + size]]
_lowercase =torch.matmul(q[i], s_start.T).sum(1).softmax(0)
_lowercase =torch.matmul(q[i], s_end.T).sum(1).softmax(0)
if p_starts is not None:
_lowercase =torch.vstack((p_starts, p_start))
_lowercase =torch.vstack((p_ends, p_end))
else:
_lowercase =p_start
_lowercase =p_end
return p_starts, p_ends
| 181
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case , _snake_case = position
_snake_case = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_snake_case = []
for position in positions:
_snake_case , _snake_case = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case , _snake_case = position
if board[y][x] == 0:
_snake_case = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
_snake_case = 0
return False
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
_snake_case = 0
_snake_case = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 224}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
_snake_case = do_convert_rgb
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 368
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
lowercase__: Any = ['''pixel_values''']
def __init__( self : Any , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __magic_name__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : Dict = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
__snake_case : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : Optional[Any] = get_size_dict(__magic_name__ , param_name="""crop_size""" )
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : str = resample
__snake_case : Any = do_center_crop
__snake_case : Optional[Any] = crop_size
__snake_case : Optional[Any] = do_rescale
__snake_case : Optional[int] = rescale_factor
__snake_case : Tuple = do_normalize
__snake_case : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__snake_case : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase__ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__snake_case : List[Any] = int((2_56 / 2_24) * size["""shortest_edge"""] )
__snake_case : int = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
__snake_case : List[str] = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__magic_name__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ) -> np.ndarray:
"""simple docstring"""
__snake_case : str = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ) -> np.ndarray:
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, Iterable[float]]] = None , __magic_name__ : Optional[Union[float, Iterable[float]]] = None , __magic_name__ : Optional[TensorType] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : str , ) -> BatchFeature:
"""simple docstring"""
__snake_case : str = do_resize if do_resize is not None else self.do_resize
__snake_case : List[str] = resample if resample is not None else self.resample
__snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case : Optional[int] = size if size is not None else self.size
__snake_case : Optional[int] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
__snake_case : Tuple = crop_size if crop_size is not None else self.crop_size
__snake_case : Tuple = get_size_dict(__magic_name__ , param_name="""crop_size""" )
__snake_case : int = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case : Optional[int] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
__snake_case : Any = [self.resize(__magic_name__ , __magic_name__ , __magic_name__ ) for image in images]
if do_center_crop:
__snake_case : Any = [self.center_crop(__magic_name__ , __magic_name__ ) for image in images]
if do_rescale:
__snake_case : Any = [self.rescale(__magic_name__ , __magic_name__ ) for image in images]
if do_normalize:
__snake_case : List[Any] = [self.normalize(__magic_name__ , __magic_name__ , __magic_name__ ) for image in images]
__snake_case : List[str] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
__snake_case : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 26
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = (EulerDiscreteScheduler,)
_lowercase : List[str] = 10
def _lowercase ( self , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : Any ={
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =self.scheduler_classes[0]
a__ : List[Any] =self.get_scheduler_config()
a__ : Optional[Any] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple =torch.manual_seed(0 )
a__ : List[Any] =self.dummy_model()
a__ : Any =self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Optional[int] =sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Union[str, Any] =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : int =output.prev_sample
a__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : int =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] =self.scheduler_classes[0]
a__ : Dict =self.get_scheduler_config(prediction_type="v_prediction" )
a__ : str =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple =torch.manual_seed(0 )
a__ : Dict =self.dummy_model()
a__ : List[str] =self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Tuple =sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : List[str] =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : Optional[Any] =output.prev_sample
a__ : Optional[Any] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =self.scheduler_classes[0]
a__ : Dict =self.get_scheduler_config()
a__ : str =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
a__ : Tuple =torch.manual_seed(0 )
a__ : Any =self.dummy_model()
a__ : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ : Optional[Any] =sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
a__ : Dict =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : Union[str, Any] =output.prev_sample
a__ : List[str] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Union[str, Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =self.scheduler_classes[0]
a__ : List[Any] =self.get_scheduler_config()
a__ : List[Any] =scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
a__ : int =torch.manual_seed(0 )
a__ : Dict =self.dummy_model()
a__ : int =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ : str =sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
a__ : Tuple =scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict =scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
a__ : Tuple =output.prev_sample
a__ : int =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Any =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 563
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Dict = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( __lowerCAmelCase ):
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = Rectangle(height=0.5 , width=0.5 )
a__ = Rectangle(height=0.25 , width=0.25 )
a__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("CPU" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("GPU" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Model" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase )
a__ = []
a__ = []
a__ = []
for i, rect in enumerate(lowerCamelCase ):
rect.set_stroke(lowerCamelCase )
a__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCamelCase , buff=0.0 )
self.add(lowerCamelCase )
model_cpu_arr.append(lowerCamelCase )
self.add(*lowerCamelCase , *lowerCamelCase , *lowerCamelCase )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Loaded Checkpoint" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCamelCase )
a__ = []
a__ = []
for i, rect in enumerate(lowerCamelCase ):
a__ = fill.copy().set_fill(lowerCamelCase , opacity=0.7 )
target.move_to(lowerCamelCase )
ckpt_arr.append(lowerCamelCase )
a__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCamelCase )
self.add(*lowerCamelCase , *lowerCamelCase )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase , lowerCamelCase )
a__ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase )
a__ = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
a__ = Text("Disk" , font_size=2_4 )
a__ = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) , Write(lowerCamelCase , run_time=1 ) , Create(lowerCamelCase , run_time=1 ) )
a__ = []
for i, rect in enumerate(lowerCamelCase ):
a__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCamelCase , run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(FadeOut(lowerCamelCase ) )
a__ = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) )
self.play(
FadeOut(lowerCamelCase , lowerCamelCase , *lowerCamelCase , *lowerCamelCase ) , )
self.wait()
| 289
| 0
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=24 , snake_case=2 , snake_case=6 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=None , snake_case=1000 , ) -> Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Union[str, Any]:
_UpperCAmelCase = LiltModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_UpperCAmelCase = model(snake_case , bbox=snake_case , token_type_ids=snake_case )
_UpperCAmelCase = model(snake_case , bbox=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(
snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
_UpperCAmelCase = LiltForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(
snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]:
return True
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def lowerCamelCase_ ( self ) -> Tuple:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(snake_case )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=snake_case )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=snake_case , bbox=snake_case )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case , atol=1E-3 ) )
| 573
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Model name or path of model to be trained.'''} )
_UpperCAmelCase = field(
default='''./''', metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''', metadata={'''help''': '''Name or path of training dataset.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''', metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase = field(default=2, metadata={'''help''': '''Batch size for training.'''} )
_UpperCAmelCase = field(default=2, metadata={'''help''': '''Batch size for evaluation.'''} )
_UpperCAmelCase = field(default=0.1, metadata={'''help''': '''Value of weight decay.'''} )
_UpperCAmelCase = field(
default=1_00_00, metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
_UpperCAmelCase = field(default=2E-4, metadata={'''help''': '''Learning rate fo training.'''} )
_UpperCAmelCase = field(default='''cosine''', metadata={'''help''': '''Learning rate.'''} )
_UpperCAmelCase = field(
default=7_50, metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
_UpperCAmelCase = field(
default=16, metadata={'''help''': '''Number of gradient accumulation steps.'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
_UpperCAmelCase = field(default=5_00_00, metadata={'''help''': '''Maximum number of training steps.'''} )
_UpperCAmelCase = field(
default=-1, metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase = field(default=10_24, metadata={'''help''': '''Sequence lengths used for training.'''} )
_UpperCAmelCase = field(default=1, metadata={'''help''': '''Training seed.'''} )
_UpperCAmelCase = field(
default=10_24, metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''', metadata={'''help''': '''Name or path of validation dataset.'''} )
_UpperCAmelCase = field(default=2, metadata={'''help''': '''Batch size used for evaluation.'''} )
_UpperCAmelCase = field(
default=-1, metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_UpperCAmelCase = field(default=10_24, metadata={'''help''': '''Length of sequences to be evaluated.'''} )
_UpperCAmelCase = field(default=1, metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Number of workers used for code evaluation.'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
_UpperCAmelCase = field(default=0.2, metadata={'''help''': '''Sampling temperature used for generation.'''} )
_UpperCAmelCase = field(default=2_56, metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
_UpperCAmelCase = field(default=0, metadata={'''help''': '''Top-k parameter used for generation.'''} )
_UpperCAmelCase = field(default=0.95, metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
_UpperCAmelCase = field(default=10, metadata={'''help''': '''Number of generations to run in parallel.'''} )
_UpperCAmelCase = field(
default=2_00, metadata={'''help''': '''Number of completions to generate for each sample.'''} )
_UpperCAmelCase = field(default=1, metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase = field(
default='''eval_results.json''', metadata={'''help''': '''Random seed used for evaluation.'''} )
_UpperCAmelCase = field(
default='''0''', metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
_UpperCAmelCase = field(
default=-1, metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
}, )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default=A, metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
}, )
_UpperCAmelCase = field(
default='''transformersbook/codeparrot''', metadata={'''help''': '''Folder or name of dataset to process.'''} )
_UpperCAmelCase = field(
default='''codeparrot-clean''', metadata={'''help''': '''Folder to save processed processed dataset.'''} )
_UpperCAmelCase = field(
default=10_00_00, metadata={'''help''': '''Number of files to save per JSON output file.'''} )
_UpperCAmelCase = field(default='''content''', metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase = field(
default=10_00, metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=1_00, metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=0.25, metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=1.5, metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
_UpperCAmelCase = field(
default=0.7, metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Name or path to the tokenizer.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
_UpperCAmelCase = field(
default=0.85, metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''gpt2''', metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
_UpperCAmelCase = field(
default='''transformersbook/codeparrot-train''', metadata={'''help''': '''Dataset to train tokenizer on.'''} )
_UpperCAmelCase = field(default='''content''', metadata={'''help''': '''Column containing text data to process.'''} )
_UpperCAmelCase = field(default=20_00_00, metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
_UpperCAmelCase = field(
default=3_27_68, metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
_UpperCAmelCase = field(default='''codeparrot''', metadata={'''help''': '''Name of new tokenizer.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Name or path to the tokenizer.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''', metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
_UpperCAmelCase = field(
default='''tokenized-codeparrot-train''', metadata={'''help''': '''Repo name of the pretokenized data.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default='''gpt2-large''', metadata={'''help''': '''Configuration to use for model initialization.'''} )
_UpperCAmelCase = field(
default='''codeparrot/codeparrot''', metadata={'''help''': '''Tokenizer attached to model.'''} )
_UpperCAmelCase = field(default='''codeparrot''', metadata={'''help''': '''Name of the created model.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 573
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : Dict , **_snake_case : int ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 701
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = None
A__ : Any = BloomTokenizerFast
A__ : Optional[Any] = BloomTokenizerFast
A__ : List[Any] = True
A__ : int = False
A__ : Any = '''tokenizer_file'''
A__ : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def snake_case_ ( self : Any ):
super().setUp()
__lowercase : int = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Dict , **_snake_case : Any ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self : Any ):
__lowercase : Dict = self.get_rust_tokenizer()
__lowercase : Optional[int] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__lowercase : Union[str, Any] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
__lowercase : Optional[int] = tokenizer.batch_encode_plus(_snake_case )['''input_ids''']
self.assertListEqual(_snake_case , _snake_case )
__lowercase : List[Any] = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : str , _snake_case : int=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowercase : List[Any] = '''This is a simple input'''
__lowercase : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
__lowercase : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
__lowercase : Dict = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = self.get_rust_tokenizer()
__lowercase : int = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_snake_case )
__lowercase : Optional[Any] = next(iter(_snake_case ) )['''premise'''] # pick up one data
__lowercase : List[str] = list(sample_data.values() )
__lowercase : List[str] = list(map(tokenizer.encode , _snake_case ) )
__lowercase : Tuple = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 284
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
lowerCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
lowerCAmelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
lowerCAmelCase__ = state_dict['''cls.predictions.decoder.weight''']
lowerCAmelCase__ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[f'cls.predictions.transform.dense.{w}']
lowerCAmelCase__ = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 41
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "van"
def __init__( self ,_A=224 ,_A=3 ,_A=[7, 3, 3, 3] ,_A=[4, 2, 2, 2] ,_A=[64, 128, 320, 512] ,_A=[3, 3, 12, 3] ,_A=[8, 8, 4, 4] ,_A="gelu" ,_A=0.0_2 ,_A=1E-6 ,_A=1E-2 ,_A=0.0 ,_A=0.0 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : str = image_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Optional[int] = patch_sizes
_lowerCAmelCase : Any = strides
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : Dict = mlp_ratios
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = layer_scale_init_value
_lowerCAmelCase : Tuple = drop_path_rate
_lowerCAmelCase : str = dropout_rate
| 259
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor
| 19
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a ( A__ : Any="" ) -> str:
"""simple docstring"""
_lowercase =tempfile.mkdtemp()
return os.path.join(A__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowercase =AgentAudio(lowerCAmelCase )
_lowercase =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
_lowercase , _lowercase =sf.read(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , torch.tensor(lowerCAmelCase ) , atol=1e-4 ) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowercase =get_new_path(suffix='.wav' )
sf.write(lowerCAmelCase , lowerCAmelCase , 16_000 )
_lowercase =AgentAudio(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =torch.randint(0 , 256 , (64, 64, 3) )
_lowercase =AgentImage(lowerCAmelCase )
_lowercase =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_lowercase =Image.open(lowerCAmelCase )
_lowercase =AgentImage(lowerCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_lowercase =Image.open(lowerCAmelCase )
_lowercase =AgentImage(lowerCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase ='Hey!'
_lowercase =AgentText(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , agent_type.to_string() )
self.assertEqual(lowerCAmelCase , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 291
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = StableDiffusionDiffEditPipeline
_a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase , )
_lowercase =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
_lowercase =DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_zero=lowerCAmelCase , )
torch.manual_seed(0 )
_lowercase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
_lowercase =CLIPTextModel(lowerCAmelCase )
_lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Tuple:
'''simple docstring'''
_lowercase =floats_tensor((1, 16, 16) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
'''simple docstring'''
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
'''simple docstring'''
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def A__ ( self ) -> Any:
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
_lowercase =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase , lowerCAmelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =pipe_loaded(**lowerCAmelCase )[0]
_lowercase =np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase , 1e-4 )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_mask_inputs(lowerCAmelCase )
_lowercase =pipe.generate_mask(**lowerCAmelCase )
_lowercase =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_lowercase =np.array([0] * 9 )
_lowercase =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inversion_inputs(lowerCAmelCase )
_lowercase =pipe.invert(**lowerCAmelCase ).images
_lowercase =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_lowercase =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_lowercase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase ={'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
_lowercase =DPMSolverMultistepScheduler(**lowerCAmelCase )
_lowercase =DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inversion_inputs(lowerCAmelCase )
_lowercase =pipe.invert(**lowerCAmelCase ).images
_lowercase =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_lowercase =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_lowercase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A__ ( cls ) -> int:
'''simple docstring'''
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
_lowercase =raw_image.convert('RGB' ).resize((768, 768) )
_lowercase =raw_image
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =torch.manual_seed(0 )
_lowercase =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase =DDIMScheduler.from_config(pipe.scheduler.config )
_lowercase =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='a bowl of fruit'
_lowercase ='a bowl of pears'
_lowercase =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
_lowercase =pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase ).latents
_lowercase =pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_lowercase =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =torch.manual_seed(0 )
_lowercase =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowercase =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='a bowl of fruit'
_lowercase ='a bowl of pears'
_lowercase =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
_lowercase =pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase , num_inference_steps=25 , ).latents
_lowercase =pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_lowercase =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 291
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = '''Hello world! cécé herlolip'''
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool ) -> List[str]:
lowerCamelCase_ = FairseqRobertaModel.from_pretrained(UpperCAmelCase__ )
roberta.eval() # disable dropout
lowerCamelCase_ = roberta.model.encoder.sentence_encoder
lowerCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , UpperCAmelCase__ )
lowerCamelCase_ = XLMRobertaXLForSequenceClassification(UpperCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(UpperCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight
lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = roberta_sent_encoder.layers[i]
lowerCamelCase_ = layer.attention
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase_ = roberta_layer.final_layer_norm.weight
lowerCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# output
lowerCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads["""mnli"""].dense.weight
lowerCamelCase_ = roberta.model.classification_heads["""mnli"""].dense.bias
lowerCamelCase_ = roberta.model.classification_heads["""mnli"""].out_proj.weight
lowerCamelCase_ = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = roberta.encode(UpperCAmelCase__ ).unsqueeze(0 ) # batch of size 1
lowerCamelCase_ = model(UpperCAmelCase__ )[0]
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads["""mnli"""](roberta.extract_features(UpperCAmelCase__ ) )
else:
lowerCamelCase_ = roberta.model(UpperCAmelCase__ )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(UpperCAmelCase__ ).mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowercase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 721
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A:
def __init__( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Dict=7 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : Any=3 , __UpperCamelCase : int=4 , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Union[str, Any]=0 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = projection_dim
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFDPRContextEncoder(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFDPRQuestionEncoder(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ):
lowerCamelCase_ = TFDPRReader(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Dict ):
lowerCamelCase_ = TFDPRModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCamelCase )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCamelCase )
@slow
def lowercase__ ( self : Optional[int] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRReader.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __A( unittest.TestCase ):
@slow
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowerCamelCase_ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase_ = model(__UpperCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 103
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __magic_name__ :
def __init__( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=1_3 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Optional[int]=6 , lowerCamelCase__ : Optional[Any]=1_7 , lowerCamelCase__ : str=2_3 , lowerCamelCase__ : Union[str, Any]=1_1 , lowerCamelCase__ : List[str]=True , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = seq_length
lowerCAmelCase : Any = act_dim
lowerCAmelCase : Optional[Any] = state_dim
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Dict = max_length
lowerCAmelCase : Optional[int] = is_training
def _A ( self : Any ):
lowerCAmelCase : Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCAmelCase : Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCAmelCase : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
lowerCAmelCase : Any = random_attention_mask((self.batch_size, self.seq_length) )
lowerCAmelCase : Any = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _A ( self : Optional[Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _A ( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any , ):
lowerCAmelCase : Any = DecisionTransformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : int = model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _A ( self : Dict ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : str = config_and_inputs
lowerCAmelCase : Tuple = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case, snake_case, snake_case, unittest.TestCase ):
_lowerCAmelCase = (DecisionTransformerModel,) if is_torch_available() else ()
_lowerCAmelCase = ()
_lowerCAmelCase = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_lowerCAmelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _A ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = DecisionTransformerModelTester(self )
lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def _A ( self : str ):
self.config_tester.run_common_tests()
def _A ( self : Any ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _A ( self : List[Any] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[Any] = DecisionTransformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( self : str ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(lowerCamelCase__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : Tuple = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(lowerCamelCase__ )] , lowerCamelCase__ )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def _A ( self : Any ):
lowerCAmelCase : Union[str, Any] = 2 # number of steps of autoregressive prediction we will perform
lowerCAmelCase : Optional[int] = 1_0 # defined by the RL environment, may be normalized
lowerCAmelCase : Optional[int] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowerCAmelCase : Optional[int] = model.to(lowerCamelCase__ )
lowerCAmelCase : int = model.config
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase__ , dtype=torch.floataa ) # env.reset()
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=lowerCamelCase__ )
lowerCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCAmelCase : Any = state
lowerCAmelCase : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=lowerCamelCase__ , dtype=torch.floataa )
lowerCAmelCase : Any = torch.zeros(1 , 0 , device=lowerCamelCase__ , dtype=torch.floataa )
lowerCAmelCase : Dict = torch.tensor(0 , device=lowerCamelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCamelCase__ ):
lowerCAmelCase : str = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCamelCase__ )] , dim=1 )
lowerCAmelCase : Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCamelCase__ )] , dim=1 )
lowerCAmelCase : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
states=lowerCamelCase__ , actions=lowerCamelCase__ , rewards=lowerCamelCase__ , returns_to_go=lowerCamelCase__ , timesteps=lowerCamelCase__ , attention_mask=lowerCamelCase__ , return_dict=lowerCamelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCAmelCase : Optional[Any] = action_pred[0, -1]
lowerCAmelCase : Optional[Any] = torch.cat([states, state] , dim=1 )
lowerCAmelCase : Optional[Any] = returns_to_go[0, -1] - reward
lowerCAmelCase : List[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCAmelCase : str = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCamelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 348
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
_lowerCAmelCase = LEDConfig
_lowerCAmelCase = {}
_lowerCAmelCase = "gelu"
def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]=1_3 , lowerCamelCase__ : List[Any]=7 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : List[str]=9_9 , lowerCamelCase__ : Any=3_2 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Optional[int]=3_7 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Any=2_0 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : List[str]=4 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Dict = seq_length
lowerCAmelCase : List[Any] = is_training
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : List[Any] = eos_token_id
lowerCAmelCase : int = pad_token_id
lowerCAmelCase : Union[str, Any] = bos_token_id
lowerCAmelCase : List[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCAmelCase : Optional[int] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCAmelCase : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A ( self : Dict ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCAmelCase : List[str] = prepare_led_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : Tuple = tf.concat(
[tf.zeros_like(lowerCamelCase__ )[:, :-1], tf.ones_like(lowerCamelCase__ )[:, -1:]] , axis=-1 , )
lowerCAmelCase : int = global_attention_mask
return config, inputs_dict
def _A ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ):
lowerCAmelCase : Optional[int] = TFLEDModel(config=lowerCamelCase__ ).get_decoder()
lowerCAmelCase : List[Any] = inputs_dict['''input_ids''']
lowerCAmelCase : Dict = input_ids[:1, :]
lowerCAmelCase : List[Any] = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase : Dict = 1
# first forward pass
lowerCAmelCase : Any = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
lowerCAmelCase , lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
lowerCAmelCase : Any = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-3 )
def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : str=None , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase : Union[str, Any] = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ ( snake_case, snake_case, unittest.TestCase ):
_lowerCAmelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCAmelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCAmelCase = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _A ( self : Optional[int] ):
lowerCAmelCase : Tuple = TFLEDModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=lowerCamelCase__ )
def _A ( self : str ):
self.config_tester.run_common_tests()
def _A ( self : List[Any] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _A ( self : List[Any] ):
lowerCAmelCase , lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = tf.zeros_like(inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[str] = 2
lowerCAmelCase : int = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
lowerCAmelCase : Tuple = True
lowerCAmelCase : List[str] = self.model_tester.seq_length
lowerCAmelCase : List[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCamelCase__ : Optional[int] ):
lowerCAmelCase : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCamelCase__ : Union[str, Any] ):
lowerCAmelCase : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
lowerCAmelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Any = model_class(lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCAmelCase : Any = len(lowerCamelCase__ )
self.assertEqual(config.output_hidden_states , lowerCamelCase__ )
check_encoder_attentions_output(lowerCamelCase__ )
if self.is_encoder_decoder:
lowerCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase__ )
check_decoder_attentions_output(lowerCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Dict = model_class(lowerCamelCase__ )
lowerCAmelCase : Dict = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase__ )
check_encoder_attentions_output(lowerCamelCase__ )
# Check attention is always last and order is fine
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
lowerCAmelCase : Dict = model(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase__ )
check_encoder_attentions_output(lowerCamelCase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _A ( self : Tuple ):
pass
def _A ( self : Any ):
# TODO: Head-masking not yet implement
pass
def UpperCAmelCase__ ( __magic_name__ : str ):
'''simple docstring'''
return tf.constant(__magic_name__ , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : str = 1E-4
@slow
@require_tf
class __magic_name__ ( unittest.TestCase ):
def _A ( self : Optional[Any] ):
lowerCAmelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowerCAmelCase : Dict = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : int = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : Tuple = prepare_led_inputs_dict(model.config , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : str = model(**lowerCamelCase__ )[0]
lowerCAmelCase : Dict = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase__ )
# change to expected output here
lowerCAmelCase : Tuple = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase__ , atol=1E-3 )
def _A ( self : Optional[int] ):
lowerCAmelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowerCAmelCase : List[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : List[Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase : List[str] = prepare_led_inputs_dict(model.config , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : Optional[int] = model(**lowerCamelCase__ )[0]
lowerCAmelCase : Any = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , lowerCamelCase__ )
# change to expected output here
lowerCAmelCase : str = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase__ , atol=1E-3 , rtol=1E-3 )
| 348
| 1
|
import os
import pytest
from attr import dataclass
a_ = """us-east-1""" # defaults region
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
lowerCAmelCase__ = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 1_6,
"""per_device_eval_batch_size""": 1_6,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_0_0,
"""save_steps""": 5_5_0_0,
}
lowerCAmelCase__ = {**hyperparameters, """max_steps""": 1_0_0_0}
@property
def lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def a__ ( _UpperCamelCase : str ):
__lowerCamelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 710
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622
| 0
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_SCREAMING_SNAKE_CASE = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = _TestCommandArgs(dataset=SCREAMING_SNAKE_CASE_ , all_configs=SCREAMING_SNAKE_CASE_ , save_infos=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = TestCommand(*SCREAMING_SNAKE_CASE_ )
test_command.run()
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_lowerCAmelCase , _lowerCAmelCase = getattr(dataset_infos["default"] , SCREAMING_SNAKE_CASE_ ), getattr(expected_dataset_infos["default"] , SCREAMING_SNAKE_CASE_ )
if key == "num_bytes":
assert is_apercent_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif key == "splits":
assert list(SCREAMING_SNAKE_CASE_ ) == list(SCREAMING_SNAKE_CASE_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 18
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 117
| 0
|
"""simple docstring"""
def __A ( a_ : int )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(a__ , a__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE : Optional[int] = F"The input value of [n={number}] has to be > 0"
raise ValueError(a__ )
else:
SCREAMING_SNAKE_CASE : List[str] = sylvester(number - 1 )
SCREAMING_SNAKE_CASE : Tuple = num - 1
SCREAMING_SNAKE_CASE : Any = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 713
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18
| 0
|
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowercase__ ( __lowercase : int = 8 ) -> str:
"""simple docstring"""
__UpperCamelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def lowercase__ ( __lowercase : str , __lowercase : int ) -> str:
"""simple docstring"""
i -= len(__lowercase )
__UpperCamelCase = i // 3
__UpperCamelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__UpperCamelCase = (
chars_incl
+ random(__lowercase , quotient + remainder )
+ random(__lowercase , __lowercase )
+ random(__lowercase , __lowercase )
)
__UpperCamelCase = list(__lowercase )
shuffle(__lowercase )
return "".join(__lowercase )
# random is a generalised function for letters, characters and numbers
def lowercase__ ( __lowercase : str , __lowercase : int ) -> str:
"""simple docstring"""
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def lowercase__ ( __lowercase : List[Any] , __lowercase : List[str] ) -> Any:
"""simple docstring"""
pass # Put your code here...
def lowercase__ ( __lowercase : Any , __lowercase : Optional[Any] ) -> str:
"""simple docstring"""
pass # Put your code here...
def lowercase__ ( __lowercase : str , __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass # Put your code here...
def lowercase__ ( __lowercase : str , __lowercase : int = 8 ) -> bool:
"""simple docstring"""
if len(__lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
__UpperCamelCase = any(char in ascii_uppercase for char in password )
__UpperCamelCase = any(char in ascii_lowercase for char in password )
__UpperCamelCase = any(char in digits for char in password )
__UpperCamelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = int(input('Please indicate the max length of your password: ' ).strip() )
__UpperCamelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(__lowercase ) )
print(
'Alternative Password generated:' , alternative_password_generator(__lowercase , __lowercase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 399
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase__ ( __lowercase : Any ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Any ):
__UpperCamelCase = metric_id
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =[MetricMock(__lowerCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowerCamelCase ( self : str ):
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def lowercase__ ( __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if "tmp_path" in args:
__UpperCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__lowercase , match='https://huggingface.co/docs/evaluate' ):
func(*__lowercase )
| 399
| 1
|
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : Optional[Any] = range(3 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=1 , **_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = factor * value
_lowerCamelCase : Union[str, Any] = value
while not is_prime(_lowerCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCamelCase )
return value
| 700
|
"""simple docstring"""
_lowerCAmelCase : List[Any] = 256
# Modulus to hash a string
_lowerCAmelCase : Tuple = 100_0003
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(_lowerCamelCase )
_lowerCamelCase : Dict = len(_lowerCamelCase )
if p_len > t_len:
return False
_lowerCamelCase : Any = 0
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCamelCase ):
_lowerCamelCase : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowerCamelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowerCamelCase : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowerCamelCase : str = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[str] = "abc1abc12"
_lowerCamelCase : str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_lowerCamelCase : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase ) and not rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 2)
_lowerCamelCase : Optional[int] = "ABABX"
_lowerCamelCase : Dict = "ABABZABABYABABX"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 3)
_lowerCamelCase : Optional[int] = "AAAB"
_lowerCamelCase : Tuple = "ABAAAAAB"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 4)
_lowerCamelCase : Dict = "abcdabcy"
_lowerCamelCase : str = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 5)
_lowerCamelCase : List[Any] = "Lü"
_lowerCamelCase : List[str] = "Lüsai"
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = "Lue"
assert not rabin_karp(_lowerCamelCase , _lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 386
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase : Any = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase : Optional[int] = tf_top_k_top_p_filtering(a_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase : List[str] = output[output != -float("inf" )]
lowerCAmelCase : Dict = tf.cast(
tf.where(tf.not_equal(a_ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a_ , a_ , rtol=1e-1_2 )
tf.debugging.assert_equal(a_ , a_ )
@require_tf
class lowerCamelCase ( unittest.TestCase , _A ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
snake_case_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def _lowerCamelCase ( self ):
# TF-only test: tf.saved_model export
lowerCAmelCase : Any = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Any = 2
lowerCAmelCase : Optional[int] = 2
class lowerCamelCase ( tf.Module ):
def __init__( self , a_ ):
super(a_ , self ).__init__()
lowerCAmelCase : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=a_ , )
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : Optional[Any] = self.model.generate(
input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : Optional[int] = [[2, 0], [102, 103]]
lowerCAmelCase : str = [[1, 0], [1, 1]]
lowerCAmelCase : Dict = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} )
lowerCAmelCase : List[Any] = tf.saved_model.load(a_ ).signatures["serving_default"]
for batch_size in range(1 , len(a_ ) + 1 ):
lowerCAmelCase : Optional[Any] = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase : Optional[Any] = serving_func(**a_ )["sequences"]
lowerCAmelCase : Optional[int] = test_model.generate(**a_ , max_new_tokens=a_ )
tf.debugging.assert_equal(a_ , a_ )
@slow
def _lowerCamelCase ( self ):
# TF-only test: tf.saved_model export
lowerCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Tuple = 2
class lowerCamelCase ( tf.Module ):
def __init__( self , a_ ):
super(a_ , self ).__init__()
lowerCAmelCase : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=a_ , )
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : str = self.model.generate(
input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : List[Any] = [[2], [102, 103]]
lowerCAmelCase : Union[str, Any] = [[1], [1, 1]]
lowerCAmelCase : int = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} )
lowerCAmelCase : Optional[int] = tf.saved_model.load(a_ ).signatures["serving_default"]
for input_row in range(len(a_ ) ):
lowerCAmelCase : Dict = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase : Optional[int] = serving_func(**a_ )["sequences"]
lowerCAmelCase : Tuple = test_model.generate(**a_ , max_new_tokens=a_ )
tf.debugging.assert_equal(a_ , a_ )
@slow
@require_tensorflow_text
def _lowerCamelCase ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=a_ )
class lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
lowerCAmelCase : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ , "spiece.model" ) , "rb" ).read() )
lowerCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _lowerCamelCase ( self , a_ , *a_ , **a_ ):
lowerCAmelCase : List[str] = self.tokenizer.tokenize(a_ )
lowerCAmelCase , lowerCAmelCase : Tuple = text.pad_model_inputs(
a_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase : Dict = self.model.generate(input_ids=a_ , attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
lowerCAmelCase : str = CompleteSentenceTransformer()
lowerCAmelCase : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
lowerCAmelCase : int = complete_model(a_ )
lowerCAmelCase : List[Any] = tf.keras.Model(a_ , a_ )
keras_model.save(a_ )
def _lowerCamelCase ( self ):
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase : Tuple = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
lowerCAmelCase : Tuple = 14
lowerCAmelCase : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : int = "Hello, my dog is cute and"
lowerCAmelCase : Tuple = tokenizer(a_ , return_tensors="tf" )
lowerCAmelCase : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : str = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
lowerCAmelCase : Optional[int] = model.generate(**a_ , eos_token_id=a_ , **a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase : List[str] = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
lowerCAmelCase : int = model.generate(**a_ , eos_token_id=a_ , **a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowerCamelCase ( self ):
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
lowerCAmelCase : Optional[int] = "Hugging Face is a technology company based in New York and Paris."
lowerCAmelCase : Union[str, Any] = bart_tokenizer(a_ , return_tensors="tf" ).input_ids
lowerCAmelCase : str = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
lowerCAmelCase : str = bart_model.generate(a_ ).numpy()
class lowerCamelCase ( _A ):
def _lowerCamelCase ( self , a_ , a_=None , **a_ ):
return super().call(a_ , **a_ )
lowerCAmelCase : List[str] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
lowerCAmelCase : Tuple = bart_model.generate(a_ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(a_ , a_ ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
def _lowerCamelCase ( self , a_ , **a_ ):
return super().call(a_ , **a_ )
lowerCAmelCase : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase : str = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase : List[str] = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ , foo="bar" )
| 525
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __A ( a_ : List[Any] ):
lowerCAmelCase : Any = tf.convert_to_tensor(a_ )
lowerCAmelCase : List[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def __A ( a_ : int ):
lowerCAmelCase : Dict = tf.convert_to_tensor(a_ )
lowerCAmelCase : int = tf.cast(math.pi ,x.dtype )
lowerCAmelCase : Dict = tf.cast(0.0_4_4_7_1_5 ,x.dtype )
lowerCAmelCase : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(a_ ,3 )) ))
return x * cdf
def __A ( a_ : Union[str, Any] ):
lowerCAmelCase : Any = tf.convert_to_tensor(a_ )
return x * tf.tanh(tf.math.softplus(a_ ) )
def __A ( a_ : List[str] ):
lowerCAmelCase : Dict = tf.convert_to_tensor(a_ )
lowerCAmelCase : Any = tf.cast(0.0_4_4_7_1_5 ,x.dtype )
lowerCAmelCase : Optional[int] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __A ( a_ : Union[str, Any] ):
lowerCAmelCase : Optional[int] = tf.convert_to_tensor(a_ )
lowerCAmelCase : List[Any] = tf.cast(1.7_0_2 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __A ( a_ : List[Any] ):
return tf.clip_by_value(_gelu(a_ ) ,-1_0 ,1_0 )
def __A ( a_ : List[Any] ,a_ : List[Any]=-1 ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = tf.split(a_ ,2 ,axis=a_ )
return a * tf.math.sigmoid(a_ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def __A ( a_ : Optional[Any] ):
return tf.keras.activations.gelu(a_ ,approximate=a_ )
lowerCAmelCase = tf.keras.activations.gelu
lowerCAmelCase = approximate_gelu_wrap
else:
lowerCAmelCase = _gelu
lowerCAmelCase = _gelu_new
lowerCAmelCase = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def __A ( a_ : int ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 525
| 1
|
def __a ( __lowerCAmelCase = 1000 ) -> int:
SCREAMING_SNAKE_CASE : int = 2**power
SCREAMING_SNAKE_CASE : Optional[Any] = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = 0
for i in list_num:
sum_of_num += int(__lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
_lowerCamelCase : Dict = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowerCamelCase : Tuple = solution(power)
print("""Sum of the digits is: """, result)
| 715
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Dict = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 308
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __a ( ) -> int:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= 9, 14 # noqa: F841
UpperCAmelCase_= [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_= defaultdict(lowerCAmelCase_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_= mst(lowerCAmelCase_ )
UpperCAmelCase_= [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_= tuple(answer[:2] )
UpperCAmelCase_= tuple(edge[::-1] )
assert edge in result or reverse in result
| 593
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ : int = 1_6
lowerCamelCase__ : Union[str, Any] = 3_2
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Optional[int]:
snake_case__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ = 16
elif accelerator.mixed_precision != "no":
snake_case__ = 8
else:
snake_case__ = None
return tokenizer.pad(
__lowerCAmelCase , padding='''longest''' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
snake_case__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ : Tuple = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCAmelCase ) == "1":
snake_case__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
snake_case__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ = config['''lr''']
snake_case__ = int(config['''num_epochs'''] )
snake_case__ = int(config['''seed'''] )
snake_case__ = int(config['''batch_size'''] )
set_seed(__lowerCAmelCase )
snake_case__ , snake_case__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
snake_case__ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case__ = os.path.split(__lowerCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(__lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case__ = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ = model(**__lowerCAmelCase )
snake_case__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ = model(**__lowerCAmelCase )
snake_case__ = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
snake_case__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(__lowerCAmelCase ),
'''epoch''': epoch,
} , step=__lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__lowerCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
snake_case__ = parser.parse_args()
snake_case__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 208
|
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = 0
snake_case__ = 0
snake_case__ = 9_9999_9999
snake_case__ = 0
snake_case__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case__ = remaining_time[j]
snake_case__ = j
snake_case__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case__ = remaining_time[short]
if minm == 0:
snake_case__ = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
snake_case__ = False
# Find finish time of current process
snake_case__ = increment_time + 1
# Calculate waiting time
snake_case__ = finish_time - arrival_time[short]
snake_case__ = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case__ = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
snake_case__ = 0
snake_case__ = 0
for i in range(__lowerCAmelCase ):
snake_case__ = total_waiting_time + waiting_time[i]
snake_case__ = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
lowerCamelCase__ : List[Any] = int(input())
lowerCamelCase__ : str = [0] * no_of_processes
lowerCamelCase__ : List[str] = [0] * no_of_processes
lowerCamelCase__ : str = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
lowerCamelCase__ , lowerCamelCase__ : str = map(int, input().split())
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Dict = burst_time
lowerCamelCase__ : Any = no_of_processes
lowerCamelCase__ : List[str] = waiting_time
lowerCamelCase__ : List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase__ : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 208
| 1
|
import string
from math import logaa
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
SCREAMING_SNAKE_CASE_ = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[int, int]:
SCREAMING_SNAKE_CASE_ = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
SCREAMING_SNAKE_CASE_ = corpus_without_punctuation.split('\n' )
SCREAMING_SNAKE_CASE_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__UpperCAmelCase ))
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
return round(tf * idf , 3 )
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5
| 0
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
A_ = 299792458
# Symbols
A_ , A_ , A_ , A_ = symbols("ct x y z")
def __UpperCAmelCase ( UpperCAmelCase )-> float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def __UpperCAmelCase ( UpperCAmelCase )-> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCAmelCase ) ** 2 )
def __UpperCAmelCase ( UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(UpperCAmelCase ), -gamma(UpperCAmelCase ) * beta(UpperCAmelCase ), 0, 0],
[-gamma(UpperCAmelCase ) * beta(UpperCAmelCase ), gamma(UpperCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase = None )-> np.ndarray:
"""simple docstring"""
if event is None:
lowercase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
A_ = transform(29979245)
print("Example of four vector: ")
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
A_ = {ct: c, x: 1, y: 1, z: 1}
A_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 479
|
from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase = list(__lowerCamelCase )
lowercase = degree
def __add__( self : Any , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : str , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def __a ( self : List[str] , __lowerCamelCase : int | float ) -> int | float:
'''simple docstring'''
lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ) -> str:
'''simple docstring'''
lowercase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return self.__str__()
def __a ( self : Union[str, Any] ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * self.degree
for i in range(self.degree ):
lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : int | float = 0 ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + 2)
lowercase = constant
for i in range(self.degree + 1 ):
lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
return not self.__eq__(__lowerCamelCase )
| 479
| 1
|
'''simple docstring'''
def A_ ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
else:
return a * actual_power(__a , int(b / 2 ) ) * actual_power(__a , int(b / 2 ) )
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] ):
if b < 0:
return 1 / actual_power(__a , __a )
return actual_power(__a , __a )
if __name__ == "__main__":
print(power(-2, -3))
| 309
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_A = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=None , ) -> Tuple:
SCREAMING_SNAKE_CASE : Tuple =size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE : Dict =parent
SCREAMING_SNAKE_CASE : str =batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] =num_channels
SCREAMING_SNAKE_CASE : int =image_size
SCREAMING_SNAKE_CASE : int =min_resolution
SCREAMING_SNAKE_CASE : List[Any] =max_resolution
SCREAMING_SNAKE_CASE : List[Any] =size
SCREAMING_SNAKE_CASE : Optional[int] =do_normalize
SCREAMING_SNAKE_CASE : Dict =do_convert_rgb
SCREAMING_SNAKE_CASE : Any =[512, 1_024, 2_048, 4_096]
SCREAMING_SNAKE_CASE : Dict =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __a ( self ) -> int:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
SCREAMING_SNAKE_CASE : List[str] =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : int =PixaStructImageProcessingTester(self )
@property
def __a ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : int =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_convert_rgb''' ) )
def __a ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] =self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : List[Any] =2_048
SCREAMING_SNAKE_CASE : List[Any] =image_processor(snake_case_ , return_tensors='''pt''' , max_patches=snake_case_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def __a ( self ) -> Any:
# Initialize image_processor
SCREAMING_SNAKE_CASE : Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : int =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE : Any =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE : str =image_processor(
snake_case_ , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> Union[str, Any]:
# Initialize image_processor
SCREAMING_SNAKE_CASE : str =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : int =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE : str =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case_ ):
SCREAMING_SNAKE_CASE : Tuple =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
SCREAMING_SNAKE_CASE : Optional[Any] ='''Hello'''
SCREAMING_SNAKE_CASE : int =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case_ , header_text=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE : Any =image_processor(
snake_case_ , return_tensors='''pt''' , max_patches=snake_case_ , header_text=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> Any:
# Initialize image_processor
SCREAMING_SNAKE_CASE : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
SCREAMING_SNAKE_CASE : Dict =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE : Any =image_processor(
snake_case_ , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> List[str]:
# Initialize image_processor
SCREAMING_SNAKE_CASE : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE : Any =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE : str =image_processor(
snake_case_ , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ) -> Any:
SCREAMING_SNAKE_CASE : List[str] =PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE : Union[str, Any] =3
@property
def __a ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Dict =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_convert_rgb''' ) )
def __a ( self ) -> List[str]:
# Initialize image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] =image_processor(
snake_case_ , return_tensors='''pt''' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 258
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = module
lowercase = nn.Sequential(
nn.Linear(module.in_features ,A__ ,bias=A__) ,nn.Linear(A__ ,module.out_features ,bias=A__) ,)
lowercase = (2.0 / (5 * min(module.in_features ,module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight ,std=A__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def A__ ( self ,A__ ,*A__ ,**A__):
return self.module(A__ ,*A__ ,**A__) + self.adapter(A__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowercase_ : Any ='''bigscience/bloom-1b7'''
# Constant values
lowercase_ : str =2.109_659_552_692_574
lowercase_ : str ='''Hello my name is'''
lowercase_ : Union[str, Any] =set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
lowercase_ : Optional[Any] =10
def A__ ( self):
# Models and tokenizer
lowercase = AutoTokenizer.from_pretrained(self.model_name)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
# Models and tokenizer
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,torch_dtype=torch.floataa ,device_map='''auto''')
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
def A__ ( self):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = self.model_abit.config
self.assertTrue(hasattr(A__ ,'''quantization_config'''))
lowercase = config.to_dict()
lowercase = config.to_diff_dict()
lowercase = config.to_json_string()
def A__ ( self):
from bitsandbytes.nn import Paramsabit
lowercase = self.model_fpaa.get_memory_footprint()
lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit ,self.EXPECTED_RELATIVE_DIFFERENCE)
lowercase = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def A__ ( self):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A__ ,torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def A__ ( self):
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
lowercase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=A__) ,self.EXPECTED_OUTPUTS)
def A__ ( self):
lowercase = BitsAndBytesConfig()
lowercase = True
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
lowercase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=A__) ,self.EXPECTED_OUTPUTS)
def A__ ( self):
with self.assertRaises(A__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A__)
def A__ ( self):
lowercase = BitsAndBytesConfig()
with self.assertRaises(A__):
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,quantization_config=A__ ,load_in_abit=A__ ,device_map='''auto''' ,bnb_abit_quant_type='''nf4''' ,)
def A__ ( self):
with self.assertRaises(A__):
# Tries with `str`
self.model_abit.to('''cpu''')
with self.assertRaises(A__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(A__):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0'''))
with self.assertRaises(A__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
lowercase = self.model_fpaa.to(torch.floataa)
lowercase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
# Check this does not throw an error
lowercase = self.model_fpaa.to('''cpu''')
# Check this does not throw an error
lowercase = self.model_fpaa.half()
# Check this does not throw an error
lowercase = self.model_fpaa.float()
def A__ ( self):
lowercase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' ,load_in_abit=A__ ,device_map='''auto''')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
@classmethod
def A__ ( cls):
lowercase = '''t5-small'''
lowercase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowercase = AutoTokenizer.from_pretrained(cls.model_name)
lowercase = '''Translate in German: Hello, my dog is cute'''
def A__ ( self):
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
from transformers import TaForConditionalGeneration
lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase = None
# test with `t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
# test with `flan-t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
lowercase = modules
def A__ ( self):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q ,bnb.nn.Linearabit))
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
# test with `flan-t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name ,load_in_abit=A__ ,device_map='''auto''')
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''').to(0)
lowercase = model.generate(**A__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
# model_name
lowercase = '''bigscience/bloom-560m'''
lowercase = '''t5-small'''
# Different types of model
lowercase = AutoModel.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# Sequence classification model
lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# CausalLM model
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=A__ ,device_map='''auto''')
# Seq2seq model
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name ,load_in_abit=A__ ,device_map='''auto''')
def A__ ( self):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
def A__ ( self):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = pipeline(
'''text-generation''' ,model=self.model_name ,model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} ,max_new_tokens=self.MAX_NEW_TOKENS ,)
# Real second forward pass
lowercase = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['''generated_text'''] ,self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
super().setUp()
def A__ ( self):
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name ,load_in_abit=A__ ,device_map='''balanced''')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) ,{0, 1})
# Check that inference pass works on the model
lowercase = self.tokenizer(self.input_text ,return_tensors='''pt''')
# Second real batch
lowercase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0) ,max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] ,skip_special_tokens=A__) ,self.EXPECTED_OUTPUTS)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
lowercase = '''facebook/opt-350m'''
super().setUp()
def A__ ( self):
if version.parse(importlib.metadata.version('''bitsandbytes''')) < version.parse('''0.37.0'''):
return
# Step 1: freeze all parameters
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=A__)
self.assertEqual(set(model.hf_device_map.values()) ,{torch.cuda.current_device()})
for param in model.parameters():
lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A__)):
lowercase = LoRALayer(module.q_proj ,rank=1_6)
lowercase = LoRALayer(module.k_proj ,rank=1_6)
lowercase = LoRALayer(module.v_proj ,rank=1_6)
# Step 3: dummy batch
lowercase = self.tokenizer('''Test batch ''' ,return_tensors='''pt''').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase = model.forward(**A__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(A__ ,A__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(A__ ,nn.Embedding):
self.assertTrue(module.weight.grad is None)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Any ='''gpt2-xl'''
lowercase_ : Union[str, Any] =3.3_191_854_854_152_187
| 633
|
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCamelCase : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : List[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : List[str] = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
__UpperCamelCase : str = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[Any] = VOCAB_FILES_NAMES
__snake_case :List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Optional[int] = PRETRAINED_INIT_CONFIGURATION
__snake_case :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Any = ElectraTokenizer
def __init__( self : Any , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : Dict="[PAD]" , _lowerCAmelCase : Union[str, Any]="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCAmelCase )
__lowercase = do_lower_case
def _a ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any=None ) -> int:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 80
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCamelCase : List[Any] = '\nHuman: <<task>>\n\nAssistant: '
__lowerCamelCase : Dict = 'huggingface-tools/default-prompts'
__lowerCamelCase : Optional[Any] = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
_UpperCamelCase =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
_UpperCamelCase =cached_file(
__SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 404
| 0
|
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowerCAmelCase ) -> dict:
"""simple docstring"""
A : Dict = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowerCAmelCase ).json()
def __UpperCamelCase ( _lowerCAmelCase = 10 ) -> list[dict]:
"""simple docstring"""
A : int = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A : Dict = requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def __UpperCamelCase ( _lowerCAmelCase = 10 ) -> str:
"""simple docstring"""
A : int = hackernews_top_stories(_lowerCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 520
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], ):
A : Dict = size if size is not None else {"""height""": 18, """width""": 18}
A : Optional[int] = parent
A : int = batch_size
A : List[Any] = num_channels
A : Optional[Any] = image_size
A : Union[str, Any] = min_resolution
A : List[str] = max_resolution
A : List[Any] = do_resize
A : List[Any] = size
A : Union[str, Any] = do_normalize
A : Union[str, Any] = image_mean
A : str = image_std
def _lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = ViTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : List[str] = EfficientFormerImageProcessorTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : List[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : List[str] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Dict = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : Union[str, Any] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Any = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : str = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Tuple = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
| 520
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : str = torch.device("""cpu""")
def _snake_case ():
UpperCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ = Image.open(requests.get(__lowercase , stream=__lowercase).raw)
return im
def _snake_case (__lowercase):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01])
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01])
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02])
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02])
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = dct.pop(__lowercase)
UpperCamelCase_ = val
def _snake_case (__lowercase):
UpperCamelCase_ = []
for k in state_dict.keys():
UpperCamelCase_ = k
if ".pwconv" in k:
UpperCamelCase_ = k_new.replace('.pwconv' , '.point_wise_conv')
if ".dwconv" in k:
UpperCamelCase_ = k_new.replace('.dwconv' , '.depth_wise_conv')
if ".Proj." in k:
UpperCamelCase_ = k_new.replace('.Proj.' , '.proj.')
if "patch_embed" in k_new:
UpperCamelCase_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding')
if "network" in k_new:
UpperCamelCase_ = k_new.split('.')
if ls[2].isdigit():
UpperCamelCase_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:])
else:
UpperCamelCase_ = k_new.replace('network' , 'swiftformer.encoder.network')
rename_keys.append((k, k_new))
return rename_keys
@torch.no_grad()
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase_ = 1000
UpperCamelCase_ = 'huggingface/label-files'
UpperCamelCase_ = 'imagenet-1k-id2label.json'
UpperCamelCase_ = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset') , 'r'))
UpperCamelCase_ = {int(__lowercase): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase_ = [3, 3, 6, 4]
UpperCamelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase_ = [3, 3, 9, 6]
UpperCamelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase_ = [4, 3, 10, 5]
UpperCamelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase_ = [4, 4, 12, 6]
UpperCamelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https'):
UpperCamelCase_ = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' , check_hash=__lowercase)
else:
UpperCamelCase_ = torch.load(__lowercase , map_location='cpu')
UpperCamelCase_ = checkpoint
UpperCamelCase_ = create_rename_keys(__lowercase)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase)
# load HuggingFace model
UpperCamelCase_ = SwiftFormerForImageClassification(__lowercase).eval()
hf_model.load_state_dict(__lowercase)
# prepare test inputs
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = ViTImageProcessor.from_pretrained('preprocessor_config')
UpperCamelCase_ = processor(images=__lowercase , return_tensors='pt')
# compare outputs from both models
UpperCamelCase_ = get_expected_output(__lowercase)
UpperCamelCase_ = hf_model(inputs['pixel_values']).logits
assert hf_logits.shape == torch.Size([1, 1000])
assert torch.allclose(hf_logits[0, 0:5] , __lowercase , atol=1e-3)
Path(__lowercase).mkdir(exist_ok=__lowercase)
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""")
hf_model.save_pretrained(__lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
snake_case__ : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 23
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : int = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[int] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , """vision""")
self.check_model_type(lowerCAmelCase_)
def __call__( self : Dict , lowerCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowerCAmelCase_ : Union[str, List[str]] = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowercase_ = kwargs.pop("""text_queries""")
if isinstance(lowerCAmelCase_ , (str, Image.Image)):
lowercase_ = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase_ = image
lowercase_ = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_)
return results
def _UpperCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = {}
if "threshold" in kwargs:
lowercase_ = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase_ = kwargs["""top_k"""]
return {}, {}, postprocess_params
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = load_image(inputs["""image"""])
lowercase_ = inputs["""candidate_labels"""]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = candidate_labels.split(""",""")
lowercase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowerCAmelCase_):
lowercase_ = self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework)
lowercase_ = self.image_processor(lowerCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowerCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = model_inputs.pop("""target_size""")
lowercase_ = model_inputs.pop("""candidate_label""")
lowercase_ = model_inputs.pop("""is_last""")
lowercase_ = self.model(**lowerCAmelCase_)
lowercase_ = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[str]=None):
"""simple docstring"""
lowercase_ = []
for model_output in model_outputs:
lowercase_ = model_output["""candidate_label"""]
lowercase_ = BaseModelOutput(lowerCAmelCase_)
lowercase_ = self.image_processor.post_process_object_detection(
outputs=lowerCAmelCase_ , threshold=lowerCAmelCase_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase_ = outputs["""scores"""][index].item()
lowercase_ = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase_ = {"""score""": score, """label""": label, """box""": box}
results.append(lowerCAmelCase_)
lowercase_ = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x["score"] , reverse=lowerCAmelCase_)
if top_k:
lowercase_ = results[:top_k]
return results
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : "torch.Tensor"):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase_ , lowercase_ , lowercase_ , lowercase_ = box.int().tolist()
lowercase_ = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 100
| 0
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase = 10
def a ( __UpperCAmelCase : list[int] ) -> list[int]:
__magic_name__: Optional[Any] = 1
__magic_name__: str = max(__UpperCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
__magic_name__: list[list] = [[] for _ in range(__UpperCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__magic_name__: Tuple = int((i / placement) % RADIX )
buckets[tmp].append(__UpperCAmelCase )
# put each buckets' contents into list_of_ints
__magic_name__: List[Any] = 0
for b in range(__UpperCAmelCase ):
for i in buckets[b]:
__magic_name__: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
__lowerCamelCase = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self : Tuple , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : str=True , __snake_case : str="[UNK]" , __snake_case : int="[SEP]" , __snake_case : int="[PAD]" , __snake_case : Any="[CLS]" , __snake_case : List[Any]="[MASK]" , __snake_case : Union[str, Any]=True , __snake_case : Dict=None , **__snake_case : str , ) -> List[Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
__magic_name__: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __snake_case ) != tokenize_chinese_chars
):
__magic_name__: Dict = getattr(__snake_case , normalizer_state.pop("""type""" ) )
__magic_name__: Optional[Any] = do_lower_case
__magic_name__: Any = strip_accents
__magic_name__: Any = tokenize_chinese_chars
__magic_name__: Union[str, Any] = normalizer_class(**__snake_case )
__magic_name__: Optional[Any] = do_lower_case
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : int=None ) -> Optional[int]:
__magic_name__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: Dict = [self.sep_token_id]
__magic_name__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
__magic_name__: Any = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 96
| 1
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
UpperCamelCase__ : List[str] = datasets.load_iris()
UpperCamelCase__ : List[Any] = np.array(data["data"])
UpperCamelCase__ : str = np.array(data["target"])
UpperCamelCase__ : Optional[int] = data["target_names"]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = train_test_split(X, y)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
return np.linalg.norm(np.array(_lowercase ) - np.array(_lowercase ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = zip(_lowercase , _lowercase )
# List of distances of all points from the point to be classified
_SCREAMING_SNAKE_CASE = []
for data_point in data:
_SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , _lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_SCREAMING_SNAKE_CASE = [i[1] for i in sorted(_lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_SCREAMING_SNAKE_CASE = Counter(_lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0
| 0
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCamelCase__ = "sshleifer/bart-tiny-random"
UpperCamelCase__ = "patrickvonplaten/t5-tiny-random"
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return AutoConfig.from_pretrained(UpperCamelCase_ )
def _lowerCamelCase ( self ):
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase_ )
def _lowerCamelCase ( self ):
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCamelCase ( self ):
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCamelCase ( self ):
with self.assertRaises(UpperCamelCase_ ):
create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=UpperCamelCase_ , d=UpperCamelCase_ )
| 619
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a ( __a = "AAPL" ) -> str:
'''simple docstring'''
UpperCamelCase__ :str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCamelCase__ :Tuple = BeautifulSoup(requests.get(__a ).text , '''html.parser''' )
UpperCamelCase__ :Tuple = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 189
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {'vocab_file': 'vocab.txt'}
lowerCAmelCase : Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowerCAmelCase : Dict = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
lowerCAmelCase : Tuple = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE_ : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : Optional[int] = normalizer_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = do_lower_case
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 353
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A_ ( a=3_2 , a=1_0 , a=1_0_0 , a=1_0_2_6 , a=True , a="data/tokenized_stories_train_wikitext103.jbl" , a="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_datasets(
a , a , number=a , min_len=1_0_2_6 , trim=a )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE_ : str = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
SCREAMING_SNAKE_CASE_ : List[str] = load_gpta('gpt2' ).to(a )
print('computing perplexity on objective set' )
SCREAMING_SNAKE_CASE_ : List[Any] = compute_perplexity(a , a , a ).item()
print('perplexity on objective set:' , a )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a , a , a , a , a , a , a , a )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A_ ( a , a=1_5 , a=1_2_8 , a=1_0_0 , a="igf_model.pt" , ):
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
SCREAMING_SNAKE_CASE_ : int = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SecondaryLearner(a )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : Any = train_secondary_learner(
a , a , max_epochs=a , batch_size=a , eval_freq=1_0_0 , igf_model_path=a , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A_ ( a , a , a , a=3_2 , a=1_0_0_0 , a=1_6 , a=1.0 , a=recopy_gpta , a=None , a=1_0 , a="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ : List[str] = RandomSampler(a )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(a , sampler=a )
SCREAMING_SNAKE_CASE_ : int = max_steps // (len(a )) + 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = recopy_model(a , a , a )
model.train()
if secondary_learner is not None:
secondary_learner.to(a )
secondary_learner.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Dict = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE_ : str = compute_perplexity(a , a , a )
test_perps.append(a )
print('Test perplexity, step' , a , ':' , a )
for epoch in range(int(a ) ):
for step, example in enumerate(a ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE_ : Any = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(a , labels=a )
SCREAMING_SNAKE_CASE_ : int = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE_ : str = secondary_learner.forward(
torch.tensor(a , dtype=torch.long , device=a ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
SCREAMING_SNAKE_CASE_ : Any = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE_ : Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE_ : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : Tuple = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_perplexity(a , a , a )
test_perps.append(a )
print('Test perplexity, step' , a , ':' , a )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , a )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=a , type=a , required=a , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=a , type=a , required=a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=a , default=a , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=a , default=a , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=a , type=a , required=a , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=a , type=a , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=a , default=a , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=3_2 , type=a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=1_0_0 , type=a , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=1_0_0 , type=a , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1_0_0_0 , type=a , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=1_2_8 , type=a , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=1_6 , type=a , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=1_0 , type=a , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=1_0_0 , type=a , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1_0_2_6 , type=a , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=1_5 , type=a , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=a , type=a , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=a , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=a , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=a , type=a , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=a , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE_ : Tuple = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : Optional[Any] = training_secondary_learner(
a , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE_ : int = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = generate_datasets(
context_len=3_2 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_0_0 , min_len=1_0_2_6 , trim=a )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a , a , a , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=a , secondary_learner=a , eval_interval=1_0 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 353
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a = logging.get_logger(__name__)
a = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase_ ( UpperCAmelCase__ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase_ = model_type_to_module_name(UpperCAmelCase__ )
lowercase_ = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(UpperCAmelCase__ , UpperCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase__ , """__name__""" , UpperCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase_ = importlib.import_module("""transformers""" )
if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ):
return getattr(UpperCAmelCase__ , UpperCAmelCase__ )
return None
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , **UpperCAmelCase__ , ):
lowercase_ = get_file_from_repo(
UpperCAmelCase__ , UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , resume_download=UpperCAmelCase__ , proxies=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , revision=UpperCAmelCase__ , local_files_only=UpperCAmelCase__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as reader:
return json.load(UpperCAmelCase__ )
class UpperCamelCase__ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__ )
def UpperCAmelCase__ ( cls : Tuple , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = kwargs.pop("""config""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""trust_remote_code""" , UpperCamelCase__ )
lowercase_ = True
lowercase_ , lowercase_ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = config_dict.get("""feature_extractor_type""" , UpperCamelCase__ )
lowercase_ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowercase_ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# It could be in `config.feature_extractor_type``
lowercase_ = getattr(UpperCamelCase__ , """feature_extractor_type""" , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase_ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
lowercase_ = feature_extractor_class_from_name(UpperCamelCase__ )
lowercase_ = feature_extractor_auto_map is not None
lowercase_ = feature_extractor_class is not None or type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING
lowercase_ = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if has_remote_code and trust_remote_code:
lowercase_ = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = kwargs.pop("""code_revision""" , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase_ = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase__ )]
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
| 412
|
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ):
lowercase_ = set(range(3 , UpperCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase__ , UpperCAmelCase__ ) ) )
lowercase_ = [float(UpperCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase__ , limit + 1 , UpperCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 412
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowercase__ =TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowercase__ =TaTokenizerFast
lowercase__ ={'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowercase__ =_LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 326
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase__ ='src/transformers'
lowercase__ ='docs/source/en'
lowercase__ ='.'
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Any = f.readlines()
# Find the start prompt.
__a : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__a : Any = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase__ ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowercase__ =re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase__ =re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase__ =re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ =direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Any = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
__a : List[Any] = (width - text_length) // 2
__a : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCamelCase ( ):
__a : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__a : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__a : Optional[int] = collections.defaultdict(lowerCAmelCase__ )
__a : List[Any] = collections.defaultdict(lowerCAmelCase__ )
__a : Dict = collections.defaultdict(lowerCAmelCase__ )
__a : Tuple = collections.defaultdict(lowerCAmelCase__ )
__a : Union[str, Any] = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
__a : Any = None
if attr_name.endswith('''Tokenizer''' ):
__a : Union[str, Any] = slow_tokenizers
__a : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__a : Union[str, Any] = fast_tokenizers
__a : List[Any] = attr_name[:-1_3]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = tf_models
__a : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = flax_models
__a : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
__a : Union[str, Any] = pt_models
__a : int = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__a : List[str] = True
break
# Try again after removing the last word in the name
__a : str = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
__a : Optional[int] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__a : Optional[int] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__a : Any = [len(lowerCAmelCase__ ) + 2 for c in columns]
__a : Union[str, Any] = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
__a : List[str] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__a : Union[str, Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
__a : str = model_name_to_prefix[name]
__a : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int]=False ):
__a , __a , __a , __a : Optional[int] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__a : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 326
| 1
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowerCamelCase : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 385
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self: List[Any] ):
'''simple docstring'''
a__ = 0
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('''image_processor_type''' )
a__ = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
a__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ):
a__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def lowercase ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
a__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
a__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase ( self: Any ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
a__ = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
a__ = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase ( self: Optional[int] ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
a__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 716
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
assert column_title.isupper()
a__ = 0
a__ = len(lowerCamelCase_) - 1
a__ = 0
while index >= 0:
a__ = (ord(column_title[index]) - 64) * pow(26 , lowerCamelCase_)
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 200
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
lowerCamelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
lowerCamelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : str , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
lowerCamelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : Tuple , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
lowerCamelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
lowerCamelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
lowerCamelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCamelCase_ ( cls : int , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 92
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : List[str] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : List[Any] = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__lowerCamelCase : List[str] = """▁"""
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = AlbertTokenizer
def __init__( self : List[Any] , __A : Optional[int]=None , __A : Any=None , __A : Optional[Any]=True , __A : List[Any]=True , __A : Tuple=False , __A : str="[CLS]" , __A : int="[SEP]" , __A : Optional[int]="<unk>" , __A : List[str]="[SEP]" , __A : Optional[Any]="<pad>" , __A : Union[str, Any]="[CLS]" , __A : Optional[Any]="[MASK]" , **__A : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case__ : Optional[Any] = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
snake_case__ : Any = do_lower_case
snake_case__ : int = remove_space
snake_case__ : List[Any] = keep_accents
snake_case__ : str = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 297
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''generated'''
def __init__( self : Any , *lowercase__ : str , **lowercase__ : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case__ ( self : int , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : Union[str, Any]=None , lowercase__ : Dict=None , **lowercase__ : List[str] , ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[str] = {}
if truncation is not None:
_UpperCamelCase : str = truncation
_UpperCamelCase : int = generate_kwargs
_UpperCamelCase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase : Tuple = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase : Optional[int] = self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
if len(lowercase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_UpperCamelCase : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self : str , lowercase__ : int , lowercase__ : int , lowercase__ : int ) ->str:
'''simple docstring'''
return True
def snake_case__ ( self : Tuple , *lowercase__ : Union[str, Any] , lowercase__ : int ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Any = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
_UpperCamelCase : Tuple = ([prefix + arg for arg in args[0]],)
_UpperCamelCase : Any = True
elif isinstance(args[0] , lowercase__ ):
_UpperCamelCase : Optional[Any] = (prefix + args[0],)
_UpperCamelCase : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
_UpperCamelCase : Tuple = self.tokenizer(*lowercase__ , padding=lowercase__ , truncation=lowercase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[str] , *lowercase__ : str , **lowercase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = super().__call__(*lowercase__ , **lowercase__ )
if (
isinstance(args[0] , lowercase__ )
and all(isinstance(lowercase__ , lowercase__ ) for el in args[0] )
and all(len(lowercase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case__ ( self : Optional[int] , lowercase__ : str , lowercase__ : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self._parse_and_tokenize(lowercase__ , truncation=lowercase__ , **lowercase__ )
return inputs
def snake_case__ ( self : Union[str, Any] , lowercase__ : int , **lowercase__ : Optional[Any] ) ->int:
'''simple docstring'''
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase : List[Any] = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase : int = tf.shape(model_inputs["input_ids"] ).numpy()
_UpperCamelCase : Optional[Any] = generate_kwargs.get("min_length" , self.model.config.min_length )
_UpperCamelCase : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(lowercase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
_UpperCamelCase : Optional[int] = self.model.generate(**lowercase__ , **lowercase__ )
_UpperCamelCase : int = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase : Optional[Any] = output_ids.reshape(lowercase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase : int = tf.reshape(lowercase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case__ ( self : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=ReturnType.TEXT , lowercase__ : Dict=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase : Optional[int] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase : int = {
f'''{self.return_name}_text''': self.tokenizer.decode(
lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , )
}
records.append(lowercase__ )
return records
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''summary'''
def __call__( self : Optional[int] , *lowercase__ : Any , **lowercase__ : List[str] ) ->Optional[Any]:
'''simple docstring'''
return super().__call__(*lowercase__ , **lowercase__ )
def snake_case__ ( self : Dict , lowercase__ : int , lowercase__ : int , lowercase__ : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''translation'''
def snake_case__ ( self : List[str] , lowercase__ : int , lowercase__ : int , lowercase__ : int ) ->Tuple:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def snake_case__ ( self : int , *lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase__ : Tuple=None , lowercase__ : List[str]=None ) ->Optional[int]:
'''simple docstring'''
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase__ ):
return self.tokenizer._build_translation_inputs(
*lowercase__ , return_tensors=self.framework , truncation=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ )
else:
return super()._parse_and_tokenize(*lowercase__ , truncation=lowercase__ )
def snake_case__ ( self : Union[str, Any] , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , **lowercase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = super()._sanitize_parameters(**lowercase__ )
if src_lang is not None:
_UpperCamelCase : Union[str, Any] = src_lang
if tgt_lang is not None:
_UpperCamelCase : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase : Optional[Any] = kwargs.get("task" , self.task )
_UpperCamelCase : int = task.split("_" )
if task and len(lowercase__ ) == 4:
# translation, XX, to YY
_UpperCamelCase : str = items[1]
_UpperCamelCase : List[Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , *lowercase__ : Dict , **lowercase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
return super().__call__(*lowercase__ , **lowercase__ )
| 204
|
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase_ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 204
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43
| 1
|
'''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : str = {}
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
if vertex not in self.adjacency:
UpperCAmelCase__ : Any = {}
self.num_vertices += 1
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
UpperCAmelCase__ : str = weight
UpperCAmelCase__ : Union[str, Any] = weight
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_edges()
for edge in edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase__ : int = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase__ : Union[str, Any] = edges[i][2] + 1
for edge in edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = edge
UpperCAmelCase__ : int = weight
UpperCAmelCase__ : str = weight
def __str__( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase__ : Union[str, Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip('''\n''' )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_=None ,lowerCamelCase_=None ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Graph()
if vertices is None:
UpperCAmelCase__ : Dict = []
if edges is None:
UpperCAmelCase__ : Optional[int] = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class _lowercase :
'''simple docstring'''
def __init__( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : List[Any] = {}
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.parent )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = item
UpperCAmelCase__ : List[Any] = 0
return item
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
UpperCAmelCase__ : Tuple = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.find(lowerCamelCase_ )
UpperCAmelCase__ : str = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase__ : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase__ : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase__ : Dict = roota
return roota
return None
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = graph.num_vertices
UpperCAmelCase__ : Any = Graph.UnionFind()
UpperCAmelCase__ : int = []
while num_components > 1:
UpperCAmelCase__ : Optional[Any] = {}
for vertex in graph.get_vertices():
UpperCAmelCase__ : Optional[int] = -1
UpperCAmelCase__ : List[Any] = graph.get_edges()
for edge in edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = edge
UpperCAmelCase__ : Optional[Any] = union_find.find(lowerCamelCase_ )
UpperCAmelCase__ : Tuple = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase__ : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase__ : int = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_ ,lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
UpperCAmelCase__ : Optional[Any] = num_components - 1
UpperCAmelCase__ : List[Any] = Graph.build(edges=lowerCamelCase_ )
return mst
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {'vocab_file': 'sentencepiece.model'}
UpperCamelCase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase__ : Any = {
'google/rembert': 256,
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=False ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="[CLS]" ,lowerCamelCase_="[SEP]" ,lowerCamelCase_="[UNK]" ,lowerCamelCase_="[SEP]" ,lowerCamelCase_="[PAD]" ,lowerCamelCase_="[CLS]" ,lowerCamelCase_="[MASK]" ,**lowerCamelCase_ ,) -> Tuple:
'''simple docstring'''
super().__init__(
do_lower_case=lowerCamelCase_ ,remove_space=lowerCamelCase_ ,keep_accents=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase__ : Union[str, Any] = do_lower_case
UpperCAmelCase__ : str = remove_space
UpperCAmelCase__ : Optional[Any] = keep_accents
UpperCAmelCase__ : List[Any] = vocab_file
UpperCAmelCase__ : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase_ )
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__dict__.copy()
UpperCAmelCase__ : Any = None
return state
def __setstate__( self ,lowerCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = d
UpperCAmelCase__ : Any = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = self.sp_model.EncodeAsPieces(lowerCamelCase_ )
return pieces
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.sp_model.decode_pieces(lowerCamelCase_ )
return out_string
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file ,lowerCamelCase_ )
return (out_vocab_file,)
| 496
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __a ( snake_case__ ):
"""simple docstring"""
_A : Union[str, Any] = (DDPMParallelScheduler,)
def __A ( self : Dict ,**_UpperCamelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __A ( self : Dict ) -> Any:
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ ,beta_end=SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ ,prediction_type=SCREAMING_SNAKE_CASE__ ,sample_max_value=SCREAMING_SNAKE_CASE__ ,)
def __A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =self.dummy_model()
SCREAMING_SNAKE_CASE__ =self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ =self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE__ =self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE__ =samplea.shape[0]
SCREAMING_SNAKE_CASE__ =torch.stack([samplea, samplea, samplea] ,dim=0 )
SCREAMING_SNAKE_CASE__ =torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
SCREAMING_SNAKE_CASE__ =scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
SCREAMING_SNAKE_CASE__ =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __A ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =self.dummy_model()
SCREAMING_SNAKE_CASE__ =self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ =torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ =model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ =scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ).prev_sample
SCREAMING_SNAKE_CASE__ =pred_prev_sample
SCREAMING_SNAKE_CASE__ =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __A ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =self.dummy_model()
SCREAMING_SNAKE_CASE__ =self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ =torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ =model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ =scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ).prev_sample
SCREAMING_SNAKE_CASE__ =pred_prev_sample
SCREAMING_SNAKE_CASE__ =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __A ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =[1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == len(SCREAMING_SNAKE_CASE__ ) - 1:
SCREAMING_SNAKE_CASE__ =-1
else:
SCREAMING_SNAKE_CASE__ =timesteps[i + 1]
SCREAMING_SNAKE_CASE__ =scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =[1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
def __A ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =[1_0_0, 8_7, 5_0, 1, 0]
SCREAMING_SNAKE_CASE__ =len(SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ ,timesteps=SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ =self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ =scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ =[scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE__ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
| 151
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
a_ :List[str] ="""nat"""
a_ :int ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[2, 4, 8, 1_6] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : int=3.0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=1E-5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(SCREAMING_SNAKE_CASE__ )
__a = num_heads
__a = kernel_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = layer_norm_eps
__a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
__a = layer_scale_init_value
__a = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
| 582
| 0
|
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self: Any , __lowerCamelCase: int = 0 ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = key
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: int ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: int = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: int ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: int = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: int = 0 ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase__: Optional[Any] = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: int = 0 ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase__: List[Any] = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def UpperCAmelCase_ ( self: str , __lowerCamelCase: str , __lowerCamelCase: int = 0 ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: int ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 221
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Dict = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
A__: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 221
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowercase:
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
class _lowercase:
"""simple docstring"""
def __init__( self: Tuple ,a: int ):
__UpperCAmelCase = [[] for _ in range(a )]
__UpperCAmelCase = size
def __getitem__( self: List[Any] ,a: int ):
return iter(self._graph[vertex] )
@property
def snake_case ( self: Dict ):
return self._size
def snake_case ( self: Tuple ,a: int ,a: int ,a: int ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(a ,a ) )
def snake_case ( self: int ,a: int ,a: int ):
__UpperCAmelCase = deque([start_vertex] )
__UpperCAmelCase = [None] * self.size
__UpperCAmelCase = 0
while queue:
__UpperCAmelCase = queue.popleft()
__UpperCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCAmelCase = current_distance + edge.weight
__UpperCAmelCase = distances[edge.destination_vertex]
if (
isinstance(a ,a )
and new_distance >= dest_vertex_distance
):
continue
__UpperCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 396
|
'''simple docstring'''
def __snake_case ( lowerCAmelCase : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
while repunit:
__UpperCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __snake_case ( lowerCAmelCase : int = 100_0000 ):
__UpperCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 396
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowercase__ =logging.get_logger(__name__)
class a_ ( UpperCamelCase__ ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 716
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def UpperCamelCase_ ( A__ ):
if not isinstance(A__ , A__ ):
a_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(A__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511
| 0
|
'''simple docstring'''
import requests
def lowerCamelCase__ ( _A , _A ):
a : Union[str, Any] = {'Content-Type': 'application/json'}
a : Dict = requests.post(lowerCAmelCase_ , json={'text': message_body} , headers=lowerCAmelCase_ )
if response.status_code != 200:
a : Union[str, Any] = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 526
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377
| 0
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 1_0, "max_num_jobs": 1}, [range(1_0 )]),
({"num_shards": 1_0, "max_num_jobs": 1_0}, [range(_lowerCAmelCase , i + 1 ) for i in range(1_0 )]),
({"num_shards": 1, "max_num_jobs": 1_0}, [range(1 )]),
({"num_shards": 1_0, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"num_shards": 3, "max_num_jobs": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def snake_case__ ( __lowercase , __lowercase ) -> Any:
"""simple docstring"""
A__ : Any = _distribute_shards(**_lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 1_0, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] = _split_gen_kwargs(_lowerCAmelCase , _lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(_lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(_lowerCAmelCase )
else:
A__ : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCAmelCase )
assert out == expected
| 716
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( UpperCamelCase ):
def __init__( self : Dict , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : Dict , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
A__ : Optional[int] = field
A__ : Dict = path_or_paths if isinstance(_A , _A) else {self.split: path_or_paths}
A__ : int = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def _lowercase ( self : Dict):
# Build iterable dataset
if self.streaming:
A__ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
A__ : Optional[Any] = None
A__ : Any = None
A__ : Tuple = None
A__ : Tuple = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
A__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Any , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.')
A__ : Tuple = dataset
A__ : Tuple = path_or_buf
A__ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ : Tuple = num_proc
A__ : Any = "utf-8"
A__ : Optional[int] = to_json_kwargs
def _lowercase ( self : Union[str, Any]):
A__ : Any = self.to_json_kwargs.pop("path_or_buf" , _A)
A__ : Union[str, Any] = self.to_json_kwargs.pop("orient" , "records")
A__ : List[Any] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False)
A__ : List[Any] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True)
A__ : int = self.to_json_kwargs.pop("compression" , _A)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , "wb" , compression=_A) as buffer:
A__ : int = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
" was passed. Please provide a local path instead.")
A__ : Optional[Any] = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
return written
def _lowercase ( self : List[Any] , _A : int):
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = args
A__ : Dict = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size) , indices=self.dataset._indices , )
A__ : Dict = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _lowercase ( self : List[str] , _A : BinaryIO , _A : str , _A : List[Any] , _A : Optional[Any] , **_A : Optional[Any] , ):
A__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(_A)
else:
A__ , A__ : Union[str, Any] = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(_A)
return written
| 182
| 0
|
'''simple docstring'''
from PIL import Image
def _a (lowercase__ : Image ) -> Image:
"""simple docstring"""
__snake_case , __snake_case = image.size
__snake_case = 0
__snake_case = image.load()
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase__ ):
for i in range(lowercase__ ):
__snake_case = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_a : Dict = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 56
|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A=False ):
if isinstance(_A , _A ) and isinstance(_A , _A ):
a : Tuple = len(set_a.intersection(_A ) )
if alternative_union:
a : Union[str, Any] = len(_A ) + len(_A )
else:
a : Optional[Any] = len(set_a.union(_A ) )
return intersection / union
if isinstance(_A , (list, tuple) ) and isinstance(_A , (list, tuple) ):
a : int = [element for element in set_a if element in set_b]
if alternative_union:
a : Optional[int] = len(_A ) + len(_A )
return len(_A ) / union
else:
a : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(_A ) / len(_A )
return len(_A ) / len(_A )
return None
if __name__ == "__main__":
lowerCAmelCase: str = {'a', 'b', 'c', 'd', 'e'}
lowerCAmelCase: Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 526
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: List[Any]=1.0 , SCREAMING_SNAKE_CASE: Tuple=None , SCREAMING_SNAKE_CASE: Any=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Dict=400 , UpperCAmelCase_ : str=2_000 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : Tuple=128 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Dict=30 , UpperCAmelCase_ : List[Any]=44_100 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = spectrogram_length
_lowerCAmelCase = feature_size
_lowerCAmelCase = num_audio_channels
_lowerCAmelCase = hop_length
_lowerCAmelCase = chunk_length
_lowerCAmelCase = sampling_rate
def __lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : str=False ) -> str:
"""simple docstring"""
def _flatten(UpperCAmelCase_ : Optional[int] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
_lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int = TvltFeatureExtractor
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = TvltFeatureExtractionTester(self )
def __lowerCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'feature_size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'hop_length' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'chunk_length' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'sampling_rate' ) )
def __lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = dict_first.pop('mel_filters' )
_lowerCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(UpperCAmelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = dict_first.pop('mel_filters' )
_lowerCAmelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_lowerCAmelCase = feature_extractor(
UpperCAmelCase_ , return_tensors='np' , sampling_rate=44_100 , mask_audio=UpperCAmelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(UpperCAmelCase_ )
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : str ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = TvltFeatureExtractor()
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_lowerCAmelCase = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCAmelCase_ , atol=1E-4 ) )
| 701
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE_: ClassVar[Features] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE_: ClassVar[Features] = Features({"labels": ClassLabel} )
SCREAMING_SNAKE_CASE_: str = "image"
SCREAMING_SNAKE_CASE_: str = "labels"
def __lowerCamelCase ( self : str , UpperCAmelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_lowerCAmelCase = copy.deepcopy(self )
_lowerCAmelCase = self.label_schema.copy()
_lowerCAmelCase = features[self.label_column]
_lowerCAmelCase = label_schema
return task_template
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 491
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.