code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from math import ceil
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = list(range(0, lowerCAmelCase__ ) )
snake_case_ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
snake_case_ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase__ )
# Missing blocks
snake_case_ = [i for i in blocks if i not in device_map_blocks]
snake_case_ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(lowerCAmelCase__ ) )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = list(range(lowerCAmelCase__ ) )
snake_case_ = int(ceil(n_layers / len(lowerCAmelCase__ ) ) )
snake_case_ = [layers[i : i + n_blocks] for i in range(0, lowerCAmelCase__, lowerCAmelCase__ )]
return dict(zip(lowerCAmelCase__, lowerCAmelCase__ ) )
| 640
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29
| 0
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: int, a_: Any=None, a_: Optional[int]=None, a_: Dict=None, **a_: Dict ):
'''simple docstring'''
if tokenize_kwargs is None:
_snake_case : Union[str, Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_snake_case : List[str] = truncation
_snake_case : Dict = tokenize_kwargs
_snake_case : Any = {}
if return_tensors is not None:
_snake_case : Any = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self: Dict, a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.framework
_snake_case : Optional[Any] = self.tokenizer(a_, return_tensors=a_, **a_ )
return model_inputs
def UpperCamelCase_ ( self: Optional[int], a_: str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model(**a_ )
return model_outputs
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: str=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self: Tuple, *a_: Union[str, Any], **a_: Any ):
'''simple docstring'''
return super().__call__(*a_, **a_ )
| 713
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 0
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
snake_case_ = str(_lowerCamelCase )
snake_case_ = "".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase__ ( _A = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
snake_case_ = 0
snake_case_ = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 376
|
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46
| 0
|
def lowerCamelCase__ ( _lowerCamelCase = 50 ) ->int:
_UpperCAmelCase =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 592
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 592
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = SpeechTaTokenizer
_A : Tuple = False
_A : List[Any] = True
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Tuple = SpeechTaTokenizer(__a )
__lowercase : Optional[int] = AddedToken("""<mask>""" , lstrip=__a , rstrip=__a )
__lowercase : Dict = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : str , __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = """this is a test"""
__lowercase : Tuple = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , __a : Optional[Any] , __a : Union[str, Any]=False , __a : List[str]=20 , __a : Optional[int]=5 ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Any = self.get_input_output_texts(__a )
__lowercase : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : int = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = """<pad>"""
__lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(__a ) , 81 )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase : Union[str, Any] = tokenizer.vocab_size
__lowercase : List[str] = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase : Tuple = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase : List[str] = tokenizer.add_tokens(__a )
__lowercase : Dict = tokenizer.vocab_size
__lowercase : Union[str, Any] = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
__lowercase : List[str] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase : str = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase : Tuple = tokenizer.add_special_tokens(__a )
__lowercase : str = tokenizer.vocab_size
__lowercase : Union[str, Any] = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
__lowercase : Any = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(__a , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
# fmt: off
self.assertListEqual(__a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__lowercase : Optional[Any] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=__a , )
| 149
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCamelCase : Dict = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowerCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase : Dict = dict(zip(vocab, range(len(vocab))))
lowerCamelCase : List[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Optional[int] = Path(tmpdirname)
lowerCamelCase : str = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowerCamelCase : Optional[Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowerCamelCase : int = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowerCamelCase : Tuple = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCamelCase : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCamelCase : Optional[Any] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCamelCase : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowerCamelCase : List[str] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 149
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = '''vit_mae'''
def __init__( self ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=224 ,__UpperCAmelCase=16 ,__UpperCAmelCase=3 ,__UpperCAmelCase=True ,__UpperCAmelCase=16 ,__UpperCAmelCase=512 ,__UpperCAmelCase=8 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=0.7_5 ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> Dict:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : int = qkv_bias
lowerCAmelCase__ : Union[str, Any] = decoder_num_attention_heads
lowerCAmelCase__ : Dict = decoder_hidden_size
lowerCAmelCase__ : str = decoder_num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = decoder_intermediate_size
lowerCAmelCase__ : str = mask_ratio
lowerCAmelCase__ : Optional[int] = norm_pix_loss
| 160
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(UpperCamelCase , ["""torch"""] )
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(UpperCamelCase , ["""torch"""] )
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(UpperCamelCase , ["""torch"""] )
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(UpperCamelCase , ["""torch"""] )
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(UpperCamelCase , ["""torch"""] )
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(UpperCamelCase , ["""torch"""] )
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(UpperCamelCase , ["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""torch"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = ['''torch''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(self ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""torch"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""torch"""] )
| 160
| 1
|
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case )
if number < 0:
return False
__magic_name__ :int = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0
| 1
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =[
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
lowercase__ =[
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
__a : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__a : str = int(re.match(R'''.*layer_(\d*).*''' , lowerCAmelCase__ )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
if dtype == torch.bool:
return 1 / 8
__a : Any = re.search(R'''[^\d](\d+)$''' , str(lowerCAmelCase__ ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
__a : str = int(bit_search.groups()[0] )
return bit_size // 8
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ):
# Construct model
if bloom_config_file == "":
__a : str = BloomConfig()
else:
__a : Dict = BloomConfig.from_json_file(lowerCAmelCase__ )
if shard_model:
__a : List[Any] = os.listdir(lowerCAmelCase__ )
__a : Union[str, Any] = sorted(filter(lambda lowerCAmelCase__ : s.startswith('''layer''' ) and "model_00" in s , lowerCAmelCase__ ) )
__a : str = {'''weight_map''': {}, '''metadata''': {}}
__a : Any = 0
__a : str = None
__a : Dict = BloomConfig()
for j, file in enumerate(lowerCAmelCase__ ):
print('''Processing file: {}'''.format(lowerCAmelCase__ ) )
__a : Optional[int] = None
for i in range(lowerCAmelCase__ ):
# load all TP files
__a : int = file.replace('''model_00''' , f"model_0{i}" )
__a : List[Any] = torch.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__a : List[Any] = list(temp.keys() )
for key in keys:
__a : Union[str, Any] = temp.pop(lowerCAmelCase__ )
if tensors is None:
__a : int = temp
else:
for key in tensors.keys():
if any(key.endswith(lowerCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__a : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__a : Dict = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__a : str = tensors[key] / pretraining_tp
torch.save(
lowerCAmelCase__ , os.path.join(
lowerCAmelCase__ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__a : Optional[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__a : str = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase__ ) ).zfill(5 ) )
__a : List[Any] = BloomConfig()
__a : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__a : Union[str, Any] = total_size
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowerCAmelCase__ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
__a : int = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '''\n'''
f.write(lowerCAmelCase__ )
else:
__a : Dict = BloomModel(lowerCAmelCase__ )
__a : Dict = os.listdir(lowerCAmelCase__ )
__a : List[str] = sorted(filter(lambda lowerCAmelCase__ : s.startswith('''layer''' ) and "model_00" in s , lowerCAmelCase__ ) )
__a : Any = None
for i, file in enumerate(lowerCAmelCase__ ):
__a : List[str] = None
for i in range(lowerCAmelCase__ ):
# load all TP files
__a : List[str] = file.replace('''model_00''' , f"model_0{i}" )
__a : Any = torch.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__a : Tuple = list(temp.keys() )
for key in keys:
__a : List[Any] = temp.pop(lowerCAmelCase__ )
if tensors is None:
__a : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowerCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__a : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__a : Optional[int] = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__a : int = tensors[key] / pretraining_tp
__a : List[str] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
__a : Union[str, Any] = set(other_keys.missing_keys )
else:
__a : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__a : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__a : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
__a : str = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
lowercase__ =parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 326
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
def __init__(self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int]=7 , snake_case_ : List[str]=3 , snake_case_ : List[str]=3_0 , snake_case_ : Union[str, Any]=4_0_0 , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=None , snake_case_ : List[Any]=True , snake_case_ : Tuple=[0.5, 0.5, 0.5] , snake_case_ : Optional[int]=[0.5, 0.5, 0.5] , snake_case_ : Dict=True , snake_case_ : Any=1 / 2_5_5 , snake_case_ : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a : Optional[Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
__a : List[Any] = parent
__a : Optional[Any] = batch_size
__a : int = num_channels
__a : Any = min_resolution
__a : Optional[Any] = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size
__a : Dict = do_normalize
__a : Any = image_mean
__a : Tuple = image_std
__a : Union[str, Any] = do_rescale
__a : Union[str, Any] = rescale_factor
__a : List[Any] = do_pad
def lowerCAmelCase (self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase (self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=False ):
if not batched:
__a : str = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : int = int(self.size['''shortest_edge'''] * h / w )
__a : Any = self.size['''shortest_edge''']
elif w > h:
__a : Tuple = self.size['''shortest_edge''']
__a : int = int(self.size['''shortest_edge'''] * w / h )
else:
__a : List[Any] = self.size['''shortest_edge''']
__a : Dict = self.size['''shortest_edge''']
else:
__a : Union[str, Any] = []
for image in image_inputs:
__a , __a : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Union[str, Any] = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__a : Any = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def lowerCAmelCase (self : Any ):
__a : Any = YolosImageProcessingTester(self )
@property
def lowerCAmelCase (self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase (self : Optional[int] ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
def lowerCAmelCase (self : Union[str, Any] ):
__a : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , snake_case_ )
__a : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__a : str = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : str ):
# Initialize image_processing
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Dict = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : int = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Tuple = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Any ):
# Initialize image_processings
__a : Any = self.image_processing_class(**self.image_processor_dict )
__a : str = self.image_processing_class(do_resize=snake_case_ , do_normalize=snake_case_ , do_rescale=snake_case_ )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__a : List[Any] = image_processing_a.pad(snake_case_ , return_tensors='''pt''' )
__a : Union[str, Any] = image_processing_a(snake_case_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def lowerCAmelCase (self : List[str] ):
# prepare image and target
__a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__a : str = json.loads(f.read() )
__a : Dict = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
__a : Optional[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
__a : Tuple = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Optional[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify orig_size
__a : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
@slow
def lowerCAmelCase (self : Optional[int] ):
# prepare image, target and masks_path
__a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__a : int = json.loads(f.read() )
__a : Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
__a : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__a : Any = YolosImageProcessor(format='''coco_panoptic''' )
__a : Any = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify masks
__a : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , snake_case_ )
# verify orig_size
__a : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
| 326
| 1
|
import requests
from bsa import BeautifulSoup
def __lowerCamelCase (UpperCAmelCase__ : str = "AAPL" ):
SCREAMING_SNAKE_CASE = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(UpperCAmelCase__ ).text , "html.parser" )
SCREAMING_SNAKE_CASE = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 403
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
from bisect import bisect
from itertools import accumulate
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Any:
__lowerCAmelCase : List[Any] = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [i[0] for i in r], [i[1] for i in r]
__lowerCAmelCase : int = list(accumulate(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_UpperCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]:
for attribute in key.split(""".""" ):
__lowerCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowerCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : Any = value
elif weight_type == "weight_v":
__lowerCAmelCase : Tuple = value
elif weight_type == "bias":
__lowerCAmelCase : str = value
else:
__lowerCAmelCase : Any = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
__lowerCAmelCase : Any = []
__lowerCAmelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCAmelCase : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCAmelCase : int = None
for name, value in fairseq_dict.items():
__lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase : Tuple = True
elif name.split(""".""" )[0] == "proj":
__lowerCAmelCase : Tuple = fairseq_model.proj
__lowerCAmelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : Union[str, Any] = name.split(SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
__lowerCAmelCase : List[str] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowerCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase : int = """weight_v"""
elif "bias" in name:
__lowerCAmelCase : Tuple = """bias"""
elif "weight" in name:
__lowerCAmelCase : int = """weight"""
else:
__lowerCAmelCase : Tuple = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase : List[Any] = name.split(""".""" )
__lowerCAmelCase : Any = int(items[0] )
__lowerCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__lowerCAmelCase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__lowerCAmelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__lowerCAmelCase : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__lowerCAmelCase : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase : str = emb.weight.shape
__lowerCAmelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> Dict:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : List[Any] = f.readlines()
__lowerCAmelCase : Any = [line.split(""" """ )[0] for line in lines]
__lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , ) -> List[str]:
__lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase : int = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCAmelCase : int = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
__lowerCAmelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__lowerCAmelCase : Union[str, Any] = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = False
# add projection layer
__lowerCAmelCase : str = nn.Parameter(projection_layer.weight )
__lowerCAmelCase : str = nn.Parameter(projection_layer.bias )
__lowerCAmelCase : Dict = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) , """w""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = hf_wavavec.config.to_dict()
__lowerCAmelCase : int = tokenizer.pad_token_id
__lowerCAmelCase : List[str] = tokenizer.bos_token_id
__lowerCAmelCase : Union[str, Any] = tokenizer.eos_token_id
__lowerCAmelCase : Any = """speech_to_text_2"""
__lowerCAmelCase : Tuple = """wav2vec2"""
__lowerCAmelCase : Tuple = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
_UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 240
| 0
|
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : Dict):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Optional[Any] = len(UpperCAmelCase__)
for i in range(n - 1):
for j in range(i + 1 , UpperCAmelCase__):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase ( UpperCAmelCase__ : Tuple):
if len(UpperCAmelCase__) <= 1:
return arr, 0
lowerCamelCase : Union[str, Any] = len(UpperCAmelCase__) // 2
lowerCamelCase : Dict = arr[0:mid]
lowerCamelCase : Optional[Any] = arr[mid:]
lowerCamelCase , lowerCamelCase : Dict = count_inversions_recursive(UpperCAmelCase__)
lowerCamelCase , lowerCamelCase : Tuple = count_inversions_recursive(UpperCAmelCase__)
lowerCamelCase , lowerCamelCase : List[Any] = _count_cross_inversions(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : Any = []
lowerCamelCase : List[str] = 0
while i < len(UpperCAmelCase__) and j < len(UpperCAmelCase__):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase__) - i
r.append(q[j])
j += 1
else:
r.append(p[i])
i += 1
if i < len(UpperCAmelCase__):
r.extend(p[i:])
else:
r.extend(q[j:])
return r, num_inversion
def UpperCAmelCase ( ):
lowerCamelCase : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCamelCase : Any = count_inversions_bf(UpperCAmelCase__)
lowerCamelCase , lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase__)
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , UpperCAmelCase__)
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase__)
lowerCamelCase , lowerCamelCase : str = count_inversions_recursive(UpperCAmelCase__)
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase__)
# an empty list should also have zero inversions
lowerCamelCase : List[str] = []
lowerCamelCase : Union[str, Any] = count_inversions_bf(UpperCAmelCase__)
lowerCamelCase , lowerCamelCase : Dict = count_inversions_recursive(UpperCAmelCase__)
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase__)
if __name__ == "__main__":
main()
| 320
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A = parser.parse_args()
if args.model_type == "bert":
A = BertForMaskedLM.from_pretrained(args.model_name)
A = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
A = model.state_dict()
A = {}
for w in ["word_embeddings", "position_embeddings"]:
A = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
A = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
A = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
A = state_dict['cls.predictions.decoder.weight']
A = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
A = state_dict[f"""cls.predictions.transform.dense.{w}"""]
A = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 320
| 1
|
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[str] = set(snake_case ), [start]
while stack:
SCREAMING_SNAKE_CASE:Any = stack.pop()
explored.add(snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(snake_case )
return explored
A_ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 711
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _a , unittest.TestCase ):
_A : str = KandinskyVaaInpaintPipeline
_A : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A : List[str] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A : int = False
@property
def __UpperCamelCase ( self : Any ):
return 32
@property
def __UpperCamelCase ( self : Tuple ):
return 32
@property
def __UpperCamelCase ( self : Union[str, Any] ):
return self.time_input_dim
@property
def __UpperCamelCase ( self : Any ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ):
return 100
@property
def __UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE:Dict = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def __UpperCamelCase ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:str = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Tuple = self.dummy_unet
SCREAMING_SNAKE_CASE:Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE:Any = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule="linear" ,beta_start=0.00_085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type="epsilon" ,thresholding=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
SCREAMING_SNAKE_CASE:Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
SCREAMING_SNAKE_CASE:Any = floats_tensor((1, 3, 64, 64) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE:int = np.ones((64, 64) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE:Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:List[Any] = "cpu"
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE:List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:List[Any] = output.images
SCREAMING_SNAKE_CASE:int = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE:Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE:Union[str, Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __UpperCamelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
SCREAMING_SNAKE_CASE:str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE:List[Any] = np.ones((768, 768) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:Optional[int] = "a hat"
SCREAMING_SNAKE_CASE:Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" ,torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE:List[str] = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt="" ,).to_tuple()
SCREAMING_SNAKE_CASE:Dict = pipeline(
image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=100 ,height=768 ,width=768 ,output_type="np" ,)
SCREAMING_SNAKE_CASE:List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
| 465
| 0
|
from math import pi
def UpperCamelCase_ ( __a , __a ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 37
|
"""simple docstring"""
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 76
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class __snake_case :
"""simple docstring"""
def __init__( self : str ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[str] = value
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Optional[int] = None
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[str] = tree
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[Any] ) -> Tuple:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
snake_case : Dict = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
snake_case : List[Any] = {
'allenai/longformer-base-4096': 40_96,
'allenai/longformer-large-4096': 40_96,
'allenai/longformer-large-4096-finetuned-triviaqa': 40_96,
'allenai/longformer-base-4096-extra.pos.embd.only': 40_96,
'allenai/longformer-large-4096-extra.pos.embd.only': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
_SCREAMING_SNAKE_CASE = bs[:]
_SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase__ )
cs.append(2**8 + n )
n += 1
_SCREAMING_SNAKE_CASE = [chr(UpperCAmelCase__ ) for n in cs]
return dict(zip(UpperCAmelCase__ ,UpperCAmelCase__ ) )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_SCREAMING_SNAKE_CASE = char
return pairs
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , )-> List[Any]:
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
_SCREAMING_SNAKE_CASE = json.load(A_ )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
_SCREAMING_SNAKE_CASE = bytes_to_unicode()
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
_SCREAMING_SNAKE_CASE = merges_handle.read().split('\n' )[1:-1]
_SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
_SCREAMING_SNAKE_CASE = dict(zip(A_ , range(len(A_ ) ) ) )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_SCREAMING_SNAKE_CASE = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __magic_name__ ( self )-> List[Any]:
return len(self.encoder )
def __magic_name__ ( self )-> int:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , A_ )-> Dict:
if token in self.cache:
return self.cache[token]
_SCREAMING_SNAKE_CASE = tuple(A_ )
_SCREAMING_SNAKE_CASE = get_pairs(A_ )
if not pairs:
return token
while True:
_SCREAMING_SNAKE_CASE = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = bigram
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
while i < len(A_ ):
try:
_SCREAMING_SNAKE_CASE = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_SCREAMING_SNAKE_CASE = tuple(A_ )
_SCREAMING_SNAKE_CASE = new_word
if len(A_ ) == 1:
break
else:
_SCREAMING_SNAKE_CASE = get_pairs(A_ )
_SCREAMING_SNAKE_CASE = ' '.join(A_ )
_SCREAMING_SNAKE_CASE = word
return word
def __magic_name__ ( self , A_ )-> Any:
_SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , A_ ):
_SCREAMING_SNAKE_CASE = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def __magic_name__ ( self , A_ )-> Optional[Any]:
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , A_ )-> str:
return self.decoder.get(A_ )
def __magic_name__ ( self , A_ )-> Optional[Any]:
_SCREAMING_SNAKE_CASE = ''.join(A_ )
_SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __magic_name__ ( self , A_ , A_ = None )-> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_SCREAMING_SNAKE_CASE = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
_SCREAMING_SNAKE_CASE = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_SCREAMING_SNAKE_CASE = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , A_ , A_ = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , A_ , A_ = None , A_ = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __magic_name__ ( self , A_ , A_ = None )-> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , A_ , A_=False , **A_ )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
_SCREAMING_SNAKE_CASE = ' ' + text
return (text, kwargs)
| 605
|
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
def merge(UpperCAmelCase__ ,UpperCAmelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = input('Enter numbers separated by a comma:\n').strip()
snake_case : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 605
| 1
|
# Imports
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Union[str, Any]:
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Union[str, Any]:
if red is not None:
_a : Optional[Any] = red
if green is not None:
_a : Union[str, Any] = green
if blue is not None:
_a : Union[str, Any] = blue
if red_edge is not None:
_a : List[str] = red_edge
if nir is not None:
_a : str = nir
return True
def __lowercase ( self , _a="" , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Optional[Any]:
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
_a : List[str] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def __lowercase ( self ) -> Any:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowercase ( self ) -> str:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowercase ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def __lowercase ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowercase ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def __lowercase ( self ) -> Optional[Any]:
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowercase ( self ) -> Union[str, Any]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowercase ( self ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green)
def __lowercase ( self ) -> int:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowercase ( self ) -> str:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowercase ( self ) -> Any:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowercase ( self ) -> Tuple:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowercase ( self , _a=0.08 , _a=1.22 , _a=0.03 ) -> Optional[Any]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowercase ( self ) -> List[str]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowercase ( self ) -> Dict:
return (self.nir / self.green) - 1
def __lowercase ( self ) -> Optional[int]:
return (self.nir / self.redEdge) - 1
def __lowercase ( self ) -> Dict:
return (self.red - self.blue) / self.red
def __lowercase ( self ) -> List[str]:
_a : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowercase ( self ) -> Any:
return self.nir - self.green
def __lowercase ( self ) -> str:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowercase ( self , _a=0.16 ) -> List[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowercase ( self , _a=0.5 ) -> Union[str, Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowercase ( self ) -> Dict:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowercase ( self , _a=None , _a=None ) -> List[str]:
return (self.nir - b) / (a * self.red)
def __lowercase ( self ) -> Dict:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowercase ( self ) -> List[str]:
return (self.red + self.green + self.blue) / 30.5
def __lowercase ( self ) -> Dict:
return self.nir / self.red
def __lowercase ( self ) -> Optional[Any]:
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowercase ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowercase ( self ) -> Tuple:
return self.green / (self.nir + self.red + self.green)
def __lowercase ( self ) -> Union[str, Any]:
return self.nir / (self.nir + self.red + self.green)
def __lowercase ( self ) -> Union[str, Any]:
return self.red / (self.nir + self.red + self.green)
def __lowercase ( self ) -> Tuple:
return (self.green - self.red) / (self.green + self.red)
def __lowercase ( self ) -> List[str]:
return (self.red - self.green) / (self.red + self.green)
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_a : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowercase ( self ) -> List[str]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowercase ( self ) -> Tuple:
return self.nir / self.red
def __lowercase ( self ) -> Union[str, Any]:
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowercase ( self ) -> Any:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 578
|
def __UpperCAmelCase ( __a : int ,__a : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a : Tuple = (boundary[1] - boundary[0]) / steps
_a : List[str] = boundary[0]
_a : Tuple = boundary[1]
_a : Tuple = make_points(__a ,__a ,__a )
_a : Tuple = 0.0
y += (h / 2.0) * f(__a )
for i in x_i:
# print(i)
y += h * f(__a )
y += (h / 2.0) * f(__a )
return y
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[int] ,__a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_a : Tuple = a + h
while x < (b - h):
yield x
_a : List[Any] = x + h
def __UpperCAmelCase ( __a : Any ) -> int: # enter your function here
"""simple docstring"""
_a : Any = (x - 0) * (x - 0)
return y
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_a : Any = 0.0 # Lower bound of integration
_a : Tuple = 1.0 # Upper bound of integration
_a : Any = 10.0 # define number of steps or resolution
_a : Optional[int] = [a, b] # define boundary of integration
_a : str = method_a(__a ,__a )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 578
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = XLNetTokenizer
_UpperCAmelCase : Dict = XLNetTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : int = True
def A ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = XLNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = '<s>'
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(lowercase ) , 1_006 )
def A ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = XLNetTokenizer(lowercase , keep_accents=lowercase )
_snake_case = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_snake_case = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_snake_case = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = XLNetTokenizer(lowercase , do_lower_case=lowercase )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = XLNetTokenizer(lowercase , do_lower_case=lowercase )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = {'input_ids': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 686
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def a_ ( __lowercase : Union[str, Any] ) -> List[Any]:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : int ) -> str:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : List[str] , __lowercase : str ) -> Dict:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Dict , __lowercase : Dict ) -> str:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> List[str]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Tuple ) -> Optional[Any]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 686
| 1
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ''''''
UpperCamelCase__ :str = ''''''
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :Any = 0
UpperCamelCase__ :Tuple = 256
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :str = 0
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :Union[str, Any] = 0
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = cva.imread(UpperCamelCase_ , 0 )
UpperCamelCase__ :Tuple = copy.deepcopy(self.img )
UpperCamelCase__ :List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
UpperCamelCase__ :List[Any] = np.sum(UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) ):
UpperCamelCase__ :Any = x[i] / self.k
self.sk += prk
UpperCamelCase__ :Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase__ :Union[str, Any] = int(last % last )
UpperCamelCase__ :int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase__ :Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase__ :List[str] = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase__ :Union[str, Any] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__snake_case = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__snake_case = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 721
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( A__ , A__ ):
"""simple docstring"""
_a = 1
@register_to_config
def __init__( self , UpperCamelCase_=2000 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=1e-3 ):
'''simple docstring'''
UpperCamelCase__ :Dict = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :List[str] = None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase_ , device=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase__ :Optional[int] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase__ :Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase__ :Tuple = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase__ :int = std.unsqueeze(-1 )
UpperCamelCase__ :List[Any] = -score / std
# compute
UpperCamelCase__ :List[str] = -1.0 / len(self.timesteps )
UpperCamelCase__ :Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase__ :Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase__ :Any = beta_t.unsqueeze(-1 )
UpperCamelCase__ :Optional[int] = -0.5 * beta_t * x
UpperCamelCase__ :int = torch.sqrt(UpperCamelCase_ )
UpperCamelCase__ :List[str] = drift - diffusion**2 * score
UpperCamelCase__ :Dict = x + drift * dt
# add noise
UpperCamelCase__ :List[Any] = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase_ , device=x.device , dtype=x.dtype )
UpperCamelCase__ :int = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 280
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __magic_name__ ( _lowerCamelCase: list ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
lowerCAmelCase = {'''+''', '''-''', '''*''', '''/'''}
lowerCAmelCase = []
for token in postfix_notation:
if token in operations:
lowerCAmelCase , lowerCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowerCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 535
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase ( unittest.TestCase ):
def UpperCAmelCase (self : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE_ ,range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCAmelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(self.tmpdirname ,SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
# load decoder from hub
lowerCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCAmelCase (self : List[str] ,**SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : str ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Dict ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase (self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def UpperCAmelCase (self : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def UpperCAmelCase (self : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = floats_list((3, 1_000) )
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE_ ,return_tensors='''np''' )
lowerCAmelCase = processor(SCREAMING_SNAKE_CASE_ ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def UpperCAmelCase (self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = '''This is a test string'''
lowerCAmelCase = processor(text=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCAmelCase (self : Tuple ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) ,SCREAMING_SNAKE_CASE_ : List[Any]=77 ) -> Optional[int]:
"""simple docstring"""
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
lowerCAmelCase = processor.decode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCAmelCase (self : int ,SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCAmelCase = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
lowerCAmelCase = processor.batch_decode(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = list(SCREAMING_SNAKE_CASE_ )
with get_context('''fork''' ).Pool() as p:
lowerCAmelCase = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,decoded_processor.lm_score )
def UpperCAmelCase (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = 15
lowerCAmelCase = -20.0
lowerCAmelCase = -4.0
lowerCAmelCase = processor.batch_decode(
SCREAMING_SNAKE_CASE_ ,beam_width=SCREAMING_SNAKE_CASE_ ,beam_prune_logp=SCREAMING_SNAKE_CASE_ ,token_min_logp=SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = decoded_processor_out.text
lowerCAmelCase = list(SCREAMING_SNAKE_CASE_ )
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,beam_width=SCREAMING_SNAKE_CASE_ ,beam_prune_logp=SCREAMING_SNAKE_CASE_ ,token_min_logp=SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
lowerCAmelCase = [d[0][2] for d in decoded_decoder_out]
lowerCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,SCREAMING_SNAKE_CASE_ ,atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,SCREAMING_SNAKE_CASE_ ,atol=1e-3 ) )
def UpperCAmelCase (self : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = 2.0
lowerCAmelCase = 5.0
lowerCAmelCase = -20.0
lowerCAmelCase = True
lowerCAmelCase = processor.batch_decode(
SCREAMING_SNAKE_CASE_ ,alpha=SCREAMING_SNAKE_CASE_ ,beta=SCREAMING_SNAKE_CASE_ ,unk_score_offset=SCREAMING_SNAKE_CASE_ ,lm_score_boundary=SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = decoded_processor_out.text
lowerCAmelCase = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ ,beta=SCREAMING_SNAKE_CASE_ ,unk_score_offset=SCREAMING_SNAKE_CASE_ ,lm_score_boundary=SCREAMING_SNAKE_CASE_ ,)
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> int:
"""simple docstring"""
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = floats_list((3, 1_000) )
lowerCAmelCase = processor_wavaveca(SCREAMING_SNAKE_CASE_ ,return_tensors='''np''' )
lowerCAmelCase = processor_auto(SCREAMING_SNAKE_CASE_ ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def UpperCAmelCase (self : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def UpperCAmelCase (SCREAMING_SNAKE_CASE_ : Tuple ,SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = self._get_dummy_logits()[0]
lowerCAmelCase = processor.decode(SCREAMING_SNAKE_CASE_ ,output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def UpperCAmelCase (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = processor.batch_decode(SCREAMING_SNAKE_CASE_ ,output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase (self : Tuple ) -> Any:
"""simple docstring"""
import torch
lowerCAmelCase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16_000 ) )
lowerCAmelCase = iter(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = next(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCAmelCase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
lowerCAmelCase = processor.decode(logits[0] ,output_word_offsets=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCAmelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ ,'''word''' ) ) ,SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ ,'''word''' ) ) ,output.text )
# output times
lowerCAmelCase = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ ,'''start_time''' ) )
lowerCAmelCase = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ ,'''end_time''' ) )
# fmt: off
lowerCAmelCase = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
lowerCAmelCase = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=0.01 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=0.01 ) )
| 535
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Any = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 699
|
def UpperCamelCase__ ( A__ ) -> list[int]:
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 699
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
a_ :Union[str, Any] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def a ( A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def a ( A__ ) -> int:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=A__ )
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = tmp_path_factory.getbasetemp() / '''cache'''
SCREAMING_SNAKE_CASE__ : Tuple = test_hf_cache_home / '''datasets'''
SCREAMING_SNAKE_CASE__ : Dict = test_hf_cache_home / '''metrics'''
SCREAMING_SNAKE_CASE__ : int = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(A__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(A__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(A__ ) )
SCREAMING_SNAKE_CASE__ : Dict = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(A__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(A__ ) )
@pytest.fixture(autouse=A__ , scope='''session''' )
def a ( ) -> List[str]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , A__ )
@pytest.fixture
def a ( A__ ) -> int:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , A__ )
| 35
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a : Any = logging.get_logger(__name__)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 637
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase__ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[int] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowercase__ ( __lowercase : Optional[int] ) -> Dict:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowercase__ ( __lowercase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = np.max(_outputs , axis=-1 , keepdims=__lowercase )
__UpperCamelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowercase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="sigmoid"
SCREAMING_SNAKE_CASE_ : Any ="softmax"
SCREAMING_SNAKE_CASE_ : Optional[int] ="none"
@add_end_docstrings(
__lowerCamelCase , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Dict =ClassificationFunction.NONE
def __init__( self : Union[str, Any] , **__A : Dict ):
super().__init__(**__A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self : Optional[int] , __A : int=None , __A : List[str]=None , __A : Any="" , **__A : List[Any] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__UpperCamelCase = tokenizer_kwargs
__UpperCamelCase = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
__UpperCamelCase = self.model.config.return_all_scores
if isinstance(__A , __A ) or top_k is None:
__UpperCamelCase = top_k
__UpperCamelCase = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __A , )
if return_all_scores:
__UpperCamelCase = None
else:
__UpperCamelCase = 1
if isinstance(__A , __A ):
__UpperCamelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__UpperCamelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Any , *__A : int , **__A : Tuple ):
__UpperCamelCase = super().__call__(*__A , **__A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__UpperCamelCase = 'top_k' not in kwargs
if isinstance(args[0] , __A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[int] , **__A : str ):
__UpperCamelCase = self.framework
if isinstance(__A , __A ):
return self.tokenizer(**__A , return_tensors=__A , **__A )
elif isinstance(__A , __A ) and len(__A ) == 1 and isinstance(inputs[0] , __A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__A , **__A )
elif isinstance(__A , __A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__A , return_tensors=__A , **__A )
def _lowerCamelCase ( self : str , __A : Tuple ):
return self.model(**__A )
def _lowerCamelCase ( self : int , __A : Tuple , __A : Any=None , __A : int=1 , __A : Optional[int]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__UpperCamelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__UpperCamelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
__UpperCamelCase = self.model.config.function_to_apply
else:
__UpperCamelCase = ClassificationFunction.NONE
__UpperCamelCase = model_outputs['logits'][0]
__UpperCamelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__UpperCamelCase = sigmoid(__A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__UpperCamelCase = softmax(__A )
elif function_to_apply == ClassificationFunction.NONE:
__UpperCamelCase = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__UpperCamelCase = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__A )
]
if not _legacy:
dict_scores.sort(key=lambda __A : x["score"] , reverse=__A )
if top_k is not None:
__UpperCamelCase = dict_scores[:top_k]
return dict_scores
| 399
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : Optional[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : int = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 118
| 0
|
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , A__ : Optional[Any] , A__ : List[str]=1_3 , A__ : Union[str, Any]=7 , A__ : Dict=True , A__ : Optional[int]=True , A__ : List[Any]=False , A__ : List[str]=True , A__ : List[Any]=9_9 , A__ : List[str]=3_2 , A__ : Union[str, Any]=5 , A__ : str=4 , A__ : Any=6_4 , A__ : Tuple="gelu" , A__ : Optional[Any]=0.1 , A__ : Dict=0.1 , A__ : Dict=5_1_2 , A__ : Dict=1_6 , A__ : Optional[Any]=2 , A__ : List[str]=0.02 , A__ : Optional[int]=3 , A__ : int=4 , A__ : Optional[Any]=None , A__ : List[Any]=2 , A__ : Union[str, Any]=2 , A__ : str=2 , A__ : str=2 , A__ : Tuple=4 , A__ : List[str]=1 , ) -> Any:
'''simple docstring'''
a__ : str = parent
a__ : Optional[Any] = batch_size
a__ : List[str] = seq_length
a__ : int = is_training
a__ : List[Any] = use_input_mask
a__ : Optional[Any] = use_token_type_ids
a__ : Any = use_labels
a__ : int = vocab_size
a__ : Optional[int] = hidden_size
a__ : Any = num_hidden_layers
a__ : Union[str, Any] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : str = hidden_act
a__ : int = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : Any = type_vocab_size
a__ : List[Any] = type_sequence_label_size
a__ : Dict = initializer_range
a__ : List[Any] = num_labels
a__ : Tuple = num_choices
a__ : List[str] = scope
a__ : int = q_groups
a__ : str = k_groups
a__ : str = v_groups
a__ : Any = post_attention_groups
a__ : List[str] = intermediate_groups
a__ : Any = output_groups
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : str = None
if self.use_input_mask:
a__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Any = None
a__ : str = None
a__ : List[str] = None
if self.use_labels:
a__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
a__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Union[str, Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
a__ : List[Any] = SqueezeBertModel(config=A__ )
model.to(A__ )
model.eval()
a__ : Tuple = model(A__ , A__ )
a__ : Optional[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Tuple , A__ : List[str] , A__ : Optional[Any] , A__ : Dict , A__ : Optional[Any] , A__ : str , A__ : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = SqueezeBertForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
a__ : Tuple = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] , A__ : Any , A__ : Optional[Any] , A__ : Any , A__ : int , A__ : Tuple , A__ : Any ) -> Tuple:
'''simple docstring'''
a__ : List[str] = SqueezeBertForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
a__ : int = model(
A__ , attention_mask=A__ , start_positions=A__ , end_positions=A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , A__ : Dict , A__ : Optional[int] , A__ : Optional[int] , A__ : str , A__ : int ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = self.num_labels
a__ : int = SqueezeBertForSequenceClassification(A__ )
model.to(A__ )
model.eval()
a__ : str = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[Any] , A__ : Optional[Any] , A__ : Tuple , A__ : Any , A__ : Optional[int] , A__ : int , A__ : List[str] ) -> Tuple:
'''simple docstring'''
a__ : Tuple = self.num_labels
a__ : List[str] = SqueezeBertForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
a__ : int = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , A__ : Dict , A__ : Any , A__ : Dict , A__ : Any , A__ : Dict ) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = self.num_choices
a__ : List[Any] = SqueezeBertForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
a__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Union[str, Any] = model(
A__ , attention_mask=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
((a__) , (a__) , (a__) , (a__) , (a__) , (a__)) : List[Any] = config_and_inputs
a__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = False
def __lowerCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
a__ : int = SqueezeBertModelTester(self )
a__ : Tuple = ConfigTester(self , config_class=A__ , dim=3_7 )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A__ )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A__ )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A__ )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A__ )
def __lowerCAmelCase ( self : Any ) -> str:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A__ )
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A__ )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[str] = SqueezeBertModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
a__ : str = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
a__ : Dict = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
a__ : int = model(A__ )[0]
a__ : Optional[int] = torch.Size((1, 3) )
self.assertEqual(output.shape , A__ )
a__ : Optional[int] = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(A__ , A__ , atol=1E-4 ) )
| 688
|
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688
| 1
|
'''simple docstring'''
import os
import sys
import transformers
A_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 705
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( _a ):
_A : int = ['''pixel_values''']
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE:int = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE:str = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ ,param_name="crop_size" )
SCREAMING_SNAKE_CASE:List[Any] = do_resize
SCREAMING_SNAKE_CASE:Optional[int] = size
SCREAMING_SNAKE_CASE:List[Any] = resample
SCREAMING_SNAKE_CASE:Any = do_center_crop
SCREAMING_SNAKE_CASE:List[Any] = crop_size
SCREAMING_SNAKE_CASE:Tuple = do_rescale
SCREAMING_SNAKE_CASE:Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE:Dict = do_normalize
SCREAMING_SNAKE_CASE:int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE:Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE:Optional[int] = do_convert_rgb
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
SCREAMING_SNAKE_CASE:List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE:Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=size["shortest_edge"] ,default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
SCREAMING_SNAKE_CASE:List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size["height"], size["width"]) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE:Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE:Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="size" ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE:Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE:Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE:str = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="crop_size" ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE:Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE:List[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE:List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE:Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE:str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE:Any = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE:List[Any] = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE:Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE:Any = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE:List[str] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
| 465
| 0
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 164
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a = 6):
lowercase__ : Node | None = None
lowercase__ : Node | None = None
self.create_linked_list(a)
def snake_case_ ( self , a):
lowercase__ : str = Node()
lowercase__ : str = current_node
lowercase__ : str = current_node
lowercase__ : Any = current_node
for _ in range(1 , a):
lowercase__ : List[str] = Node()
lowercase__ : Optional[Any] = current_node
lowercase__ : Union[str, Any] = previous_node
lowercase__ : List[str] = current_node
lowercase__ : Optional[int] = self.front
lowercase__ : str = previous_node
def snake_case_ ( self):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case_ ( self):
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case_ ( self , a):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ : Dict = self.rear.next
if self.rear:
lowercase__ : Any = data
def snake_case_ ( self):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ : Any = self.front.data
lowercase__ : Optional[Any] = None
return data
lowercase__ : Optional[Any] = self.front
lowercase__ : str = old_front.next
lowercase__ : Union[str, Any] = old_front.data
lowercase__ : List[str] = None
return data
def snake_case_ ( self):
if self.is_empty():
raise Exception('Empty Queue')
def snake_case_ ( self):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue')
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any | None = None
lowercase__ : Node | None = None
lowercase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] = logging.get_logger(__name__)
_A : str = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : str = "unispeech"
def __init__( self : Dict , A : Any=3_2 , A : Optional[int]=7_6_8 , A : Optional[int]=1_2 , A : Optional[Any]=1_2 , A : Optional[Any]=3_0_7_2 , A : Union[str, Any]="gelu" , A : Tuple=0.1 , A : Dict=0.1 , A : Optional[int]=0.1 , A : List[str]=0.0 , A : List[str]=0.0 , A : Tuple=0.1 , A : int=0.1 , A : Any=0.02 , A : int=1e-5 , A : Dict="group" , A : Optional[int]="gelu" , A : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A : Dict=(5, 2, 2, 2, 2, 2, 2) , A : List[str]=(1_0, 3, 3, 3, 3, 2, 2) , A : List[Any]=False , A : List[str]=1_2_8 , A : Optional[int]=1_6 , A : int=False , A : Any=True , A : Any=0.05 , A : List[Any]=1_0 , A : Tuple=2 , A : List[str]=0.0 , A : Any=1_0 , A : str=0 , A : Union[str, Any]=3_2_0 , A : Optional[int]=2 , A : str=0.1 , A : Tuple=1_0_0 , A : Union[str, Any]=2_5_6 , A : Optional[int]=2_5_6 , A : List[str]=0.1 , A : Tuple="mean" , A : Dict=False , A : Tuple=False , A : Optional[Any]=2_5_6 , A : Optional[int]=8_0 , A : Tuple=0 , A : Any=1 , A : List[Any]=2 , A : str=0.5 , **A : Optional[Any] , ) ->Tuple:
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Dict = feat_extract_norm
lowerCamelCase__ : Optional[int] = feat_extract_activation
lowerCamelCase__ : Dict = list(A )
lowerCamelCase__ : Tuple = list(A )
lowerCamelCase__ : Tuple = list(A )
lowerCamelCase__ : List[Any] = conv_bias
lowerCamelCase__ : str = num_conv_pos_embeddings
lowerCamelCase__ : Any = num_conv_pos_embedding_groups
lowerCamelCase__ : int = len(self.conv_dim )
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Tuple = hidden_dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : int = feat_proj_dropout
lowerCamelCase__ : Optional[Any] = final_dropout
lowerCamelCase__ : List[Any] = layerdrop
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Any = num_ctc_classes
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = do_stable_layer_norm
lowerCamelCase__ : Dict = use_weighted_layer_sum
lowerCamelCase__ : str = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : Tuple = apply_spec_augment
lowerCamelCase__ : Union[str, Any] = mask_time_prob
lowerCamelCase__ : Tuple = mask_time_length
lowerCamelCase__ : int = mask_time_min_masks
lowerCamelCase__ : List[Any] = mask_feature_prob
lowerCamelCase__ : Dict = mask_feature_length
lowerCamelCase__ : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ : List[Any] = num_codevectors_per_group
lowerCamelCase__ : Optional[int] = num_codevector_groups
lowerCamelCase__ : Union[str, Any] = contrastive_logits_temperature
lowerCamelCase__ : Optional[int] = feat_quantizer_dropout
lowerCamelCase__ : Optional[Any] = num_negatives
lowerCamelCase__ : Dict = codevector_dim
lowerCamelCase__ : List[str] = proj_codevector_dim
lowerCamelCase__ : str = diversity_loss_weight
# ctc loss
lowerCamelCase__ : List[Any] = ctc_loss_reduction
lowerCamelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCamelCase__ : Any = replace_prob
@property
def __lowerCamelCase ( self : int ) ->Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 718
|
import math
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[Any] = 0
while num > 0:
lowerCamelCase__ : Tuple = num % 8
lowerCamelCase__ : List[str] = octal + (remainder * math.floor(math.pow(10 , UpperCAmelCase ) ))
counter += 1
lowerCamelCase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(UpperCAmelCase )}"
def _a ( ) -> None:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 130
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : Node | None = None
__UpperCamelCase : Node | None = None
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Node(2 )
SCREAMING_SNAKE_CASE__ : Tuple = Node(3 )
SCREAMING_SNAKE_CASE__ : int = Node(4 )
SCREAMING_SNAKE_CASE__ : List[str] = Node(5 )
return tree
def lowercase_ ( _snake_case ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase_ ( _snake_case ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase_ ( _snake_case ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase_ ( _snake_case ):
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : list[Any] = []
if root is None:
return output
SCREAMING_SNAKE_CASE__ : Optional[Any] = deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE__ : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : list[Any] = []
def populate_output(_snake_case ,_snake_case ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_snake_case ,_snake_case )
return output
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : list[Any] = []
def populate_output(_snake_case ,_snake_case ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_snake_case ,_snake_case )
return output
def lowercase_ ( _snake_case ):
if root is None:
return []
SCREAMING_SNAKE_CASE__ : list[Sequence[Node | None]] = []
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Dict = height(_snake_case )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_snake_case ,_snake_case ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(_snake_case ,_snake_case ) )
SCREAMING_SNAKE_CASE__ : List[str] = 0
return output
def lowercase_ ( ): # Main function for testing.
SCREAMING_SNAKE_CASE__ : Dict = make_tree()
print(f'''In-order Traversal: {inorder(_snake_case )}''' )
print(f'''Pre-order Traversal: {preorder(_snake_case )}''' )
print(f'''Post-order Traversal: {postorder(_snake_case )}''' ,"""\n""" )
print(f'''Height of Tree: {height(_snake_case )}''' ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_snake_case ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_snake_case ) + 1 ):
print(f'''Level {level}:''' ,get_nodes_from_left_to_right(_snake_case ,level=_snake_case ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 223
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase__ : Optional[Any] = {'UserAgent': UserAgent().random}
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = script.contents[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''https://www.instagram.com/{username}/'''
SCREAMING_SNAKE_CASE__ : List[str] = self.get_json()
def __magic_name__ (self ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
SCREAMING_SNAKE_CASE__ : List[str] = BeautifulSoup(SCREAMING_SNAKE_CASE__ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ) -> str:
"""simple docstring"""
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__(self ) -> str:
"""simple docstring"""
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def __magic_name__ (self ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def __magic_name__ (self ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowercase_ ( _snake_case = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE__ : List[Any] = InstagramUser(_snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,_snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : List[str] = InstagramUser('github')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 223
| 1
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__A = logging.getLogger(__name__)
class __lowerCAmelCase ( __A ):
"""simple docstring"""
snake_case_ = '''summarization'''
snake_case_ = ['''loss''']
snake_case_ = ROUGE_KEYS
snake_case_ = '''rouge2'''
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__lowerCamelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowerCamelCase__ , num_labels=lowerCamelCase__ , mode=self.mode , **lowerCamelCase__ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
__lowerCamelCase = Path(self.output_dir ) / 'metrics.json'
__lowerCamelCase = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
__lowerCamelCase = 0
__lowerCamelCase = defaultdict(lowerCamelCase__ )
__lowerCamelCase = self.config.model_type
__lowerCamelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
__lowerCamelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__lowerCamelCase = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
__lowerCamelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__lowerCamelCase = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__lowerCamelCase = get_git_info()['repo_sha']
__lowerCamelCase = hparams.num_workers
__lowerCamelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase__ ):
__lowerCamelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__lowerCamelCase = self.decoder_start_token_id
__lowerCamelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
__lowerCamelCase = False
__lowerCamelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__lowerCamelCase = self.hparams.eval_max_gen_length
else:
__lowerCamelCase = self.model.config.max_length
__lowerCamelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase__ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
__lowerCamelCase = True
return readable_batch
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.model(lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return lmap(str.strip , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.pad_token_id
__lowerCamelCase = batch['input_ids'], batch['attention_mask']
__lowerCamelCase = batch['labels']
if isinstance(self.model , lowerCamelCase__ ):
__lowerCamelCase = self.model._shift_right(lowerCamelCase__ )
else:
__lowerCamelCase = shift_tokens_right(lowerCamelCase__ , lowerCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__lowerCamelCase = decoder_input_ids
self.save_readable_batch(lowerCamelCase__ )
__lowerCamelCase = self(lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__lowerCamelCase = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__lowerCamelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
__lowerCamelCase = label_smoothed_nll_loss(
lowerCamelCase__ , lowerCamelCase__ , self.hparams.label_smoothing , ignore_index=lowerCamelCase__ )
return (loss,)
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self._step(lowerCamelCase__ )
__lowerCamelCase = dict(zip(self.loss_names , lowerCamelCase__ ) )
# tokens per batch
__lowerCamelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
__lowerCamelCase = batch['input_ids'].shape[0]
__lowerCamelCase = batch['input_ids'].eq(self.pad ).sum()
__lowerCamelCase = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__="val" ) -> Optional[Any]:
'''simple docstring'''
self.step_count += 1
__lowerCamelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__lowerCamelCase = losses['loss']
__lowerCamelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
__lowerCamelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase__ )
__lowerCamelCase = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__lowerCamelCase = self.step_count
self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path
__lowerCamelCase = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return calculate_rouge(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__lowerCamelCase = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__lowerCamelCase = (time.time() - ta) / batch['input_ids'].shape[0]
__lowerCamelCase = self.ids_to_clean_text(lowerCamelCase__ )
__lowerCamelCase = self.ids_to_clean_text(batch['labels'] )
__lowerCamelCase = self._step(lowerCamelCase__ )
__lowerCamelCase = dict(zip(self.loss_names , lowerCamelCase__ ) )
__lowerCamelCase = self.calc_generative_metrics(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = np.mean(lmap(lowerCamelCase__ , lowerCamelCase__ ) )
base_metrics.update(gen_time=lowerCamelCase__ , gen_len=lowerCamelCase__ , preds=lowerCamelCase__ , target=lowerCamelCase__ , **lowerCamelCase__ )
return base_metrics
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
return self.validation_epoch_end(lowerCamelCase__ , prefix='test' )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.n_obs[type_path]
__lowerCamelCase = self.target_lens[type_path]
__lowerCamelCase = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase__ , n_obs=lowerCamelCase__ , max_target_length=lowerCamelCase__ , **self.dataset_kwargs , )
return dataset
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.get_dataset(lowerCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_sortish_sampler(lowerCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_sampler=lowerCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase__ )
return dataloader
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase_ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ )
add_generic_args(lowerCamelCase__ , lowerCamelCase__ )
parser.add_argument(
'--max_source_length' , default=1_024 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCamelCase__ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCamelCase__ )
parser.add_argument('--max_tokens_per_batch' , type=lowerCamelCase__ , default=lowerCamelCase__ )
parser.add_argument('--logger_name' , type=lowerCamelCase__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowerCamelCase__ , default=500 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowerCamelCase__ , default='summarization' , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowerCamelCase__ , default=0.0 , required=lowerCamelCase__ )
parser.add_argument('--src_lang' , type=lowerCamelCase__ , default='' , required=lowerCamelCase__ )
parser.add_argument('--tgt_lang' , type=lowerCamelCase__ , default='' , required=lowerCamelCase__ )
parser.add_argument('--eval_beams' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ )
parser.add_argument(
'--val_metric' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowerCamelCase__ , default=1 , required=lowerCamelCase__ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class __lowerCAmelCase ( __A ):
"""simple docstring"""
snake_case_ = '''translation'''
snake_case_ = ['''loss''']
snake_case_ = ['''bleu''']
snake_case_ = '''bleu'''
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = hparams.src_lang
__lowerCamelCase = hparams.tgt_lang
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=None ) -> int:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__lowerCamelCase = SummarizationModule(__lowercase )
else:
__lowerCamelCase = TranslationModule(__lowercase )
__lowerCamelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
__lowerCamelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = os.environ.get('WANDB_PROJECT' , __lowercase )
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__lowerCamelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__lowerCamelCase = False
__lowerCamelCase = args.val_metric == 'loss'
__lowerCamelCase = generic_train(
__lowercase , __lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowercase ) , early_stopping_callback=__lowercase , logger=__lowercase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
__lowerCamelCase = ''
__lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__lowercase ) )
if checkpoints:
__lowerCamelCase = checkpoints[-1]
__lowerCamelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = pl.Trainer.add_argparse_args(parser)
__A = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__A = parser.parse_args()
main(args)
| 704
|
import requests
__A = "" # <-- Put your OpenWeatherMap appid here!
__A = "https://api.openweathermap.org/data/2.5/"
def lowerCamelCase_ ( UpperCamelCase__ : str = "Chicago" , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase_ ( UpperCamelCase__ : str = "Kolkata, India" , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase_ ( UpperCamelCase__ : float = 55.68 , UpperCamelCase__ : float = 12.57 , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__A = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 167
| 0
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "geglu" , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : str = "layer_norm" , lowerCAmelCase : bool = False , ):
super().__init__()
lowerCAmelCase = only_cross_attention
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase = AdaLayerNorm(lowerCAmelCase , lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase = AdaLayerNormZero(lowerCAmelCase , lowerCAmelCase )
else:
lowerCAmelCase = nn.LayerNorm(lowerCAmelCase , elementwise_affine=lowerCAmelCase )
lowerCAmelCase = Attention(
query_dim=lowerCAmelCase , heads=lowerCAmelCase , dim_head=lowerCAmelCase , dropout=lowerCAmelCase , bias=lowerCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowerCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase = (
AdaLayerNorm(lowerCAmelCase , lowerCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCAmelCase , elementwise_affine=lowerCAmelCase )
)
lowerCAmelCase = Attention(
query_dim=lowerCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowerCAmelCase , dim_head=lowerCAmelCase , dropout=lowerCAmelCase , bias=lowerCAmelCase , upcast_attention=lowerCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase = None
lowerCAmelCase = None
# 3. Feed-forward
lowerCAmelCase = nn.LayerNorm(lowerCAmelCase , elementwise_affine=lowerCAmelCase )
lowerCAmelCase = FeedForward(lowerCAmelCase , dropout=lowerCAmelCase , activation_fn=lowerCAmelCase , final_dropout=lowerCAmelCase )
# let chunk size default to None
lowerCAmelCase = None
lowerCAmelCase = 0
def __lowercase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int ):
# Sets chunk feed-forward
lowerCAmelCase = chunk_size
lowerCAmelCase = dim
def __lowercase ( self : str , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Dict[str, Any] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCAmelCase = self.norma(lowerCAmelCase , lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.norma(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase = self.norma(lowerCAmelCase )
lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase = self.attna(
lowerCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowerCAmelCase , **lowerCAmelCase , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase = (
self.norma(lowerCAmelCase , lowerCAmelCase ) if self.use_ada_layer_norm else self.norma(lowerCAmelCase )
)
lowerCAmelCase = self.attna(
lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase = self.norma(lowerCAmelCase )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase = torch.cat(
[self.ff(lowerCAmelCase ) for hid_slice in norm_hidden_states.chunk(lowerCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase = self.ff(lowerCAmelCase )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : str = "geglu" , lowerCAmelCase : bool = False , ):
super().__init__()
lowerCAmelCase = int(dim * mult )
lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase = GELU(lowerCAmelCase , lowerCAmelCase )
if activation_fn == "gelu-approximate":
lowerCAmelCase = GELU(lowerCAmelCase , lowerCAmelCase , approximate="""tanh""" )
elif activation_fn == "geglu":
lowerCAmelCase = GEGLU(lowerCAmelCase , lowerCAmelCase )
elif activation_fn == "geglu-approximate":
lowerCAmelCase = ApproximateGELU(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(lowerCAmelCase )
# project dropout
self.net.append(nn.Dropout(lowerCAmelCase ) )
# project out
self.net.append(nn.Linear(lowerCAmelCase , lowerCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCAmelCase ) )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : List[str] ):
for module in self.net:
lowerCAmelCase = module(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : str = "none" ):
super().__init__()
lowerCAmelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = approximate
def __lowercase ( self : Tuple , lowerCAmelCase : Dict ):
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __lowercase ( self : List[str] , lowerCAmelCase : Dict ):
lowerCAmelCase = self.proj(lowerCAmelCase )
lowerCAmelCase = self.gelu(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
super().__init__()
lowerCAmelCase = nn.Linear(lowerCAmelCase , dim_out * 2 )
def __lowercase ( self : List[Any] , lowerCAmelCase : Tuple ):
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __lowercase ( self : Any , lowerCAmelCase : Optional[int] ):
lowerCAmelCase , lowerCAmelCase = self.proj(lowerCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ):
super().__init__()
lowerCAmelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : int , lowerCAmelCase : str ):
lowerCAmelCase = self.proj(lowerCAmelCase )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ):
super().__init__()
lowerCAmelCase = nn.Embedding(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(lowerCAmelCase , embedding_dim * 2 )
lowerCAmelCase = nn.LayerNorm(lowerCAmelCase , elementwise_affine=lowerCAmelCase )
def __lowercase ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : int ):
lowerCAmelCase = self.linear(self.silu(self.emb(lowerCAmelCase ) ) )
lowerCAmelCase , lowerCAmelCase = torch.chunk(lowerCAmelCase , 2 )
lowerCAmelCase = self.norm(lowerCAmelCase ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ):
super().__init__()
lowerCAmelCase = CombinedTimestepLabelEmbeddings(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(lowerCAmelCase , 6 * embedding_dim , bias=lowerCAmelCase )
lowerCAmelCase = nn.LayerNorm(lowerCAmelCase , elementwise_affine=lowerCAmelCase , eps=1e-6 )
def __lowercase ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=None ):
lowerCAmelCase = self.linear(self.silu(self.emb(lowerCAmelCase , lowerCAmelCase , hidden_dtype=lowerCAmelCase ) ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = emb.chunk(6 , dim=1 )
lowerCAmelCase = self.norm(lowerCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : float = 1e-5 ):
super().__init__()
lowerCAmelCase = num_groups
lowerCAmelCase = eps
if act_fn is None:
lowerCAmelCase = None
else:
lowerCAmelCase = get_activation(lowerCAmelCase )
lowerCAmelCase = nn.Linear(lowerCAmelCase , out_dim * 2 )
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ):
if self.act:
lowerCAmelCase = self.act(lowerCAmelCase )
lowerCAmelCase = self.linear(lowerCAmelCase )
lowerCAmelCase = emb[:, :, None, None]
lowerCAmelCase , lowerCAmelCase = emb.chunk(2 , dim=1 )
lowerCAmelCase = F.group_norm(lowerCAmelCase , self.num_groups , eps=self.eps )
lowerCAmelCase = x * (1 + scale) + shift
return x
| 169
|
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = (DDIMParallelScheduler,)
_a = (('eta', 0.0), ('num_inference_steps', 50))
def __lowercase ( self : Optional[int] , **lowerCAmelCase : List[str] ):
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowerCAmelCase )
return config
def __lowercase ( self : Any , **lowerCAmelCase : Tuple ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase = 10, 0.0
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for t in scheduler.timesteps:
lowerCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def __lowercase ( self : Dict ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def __lowercase ( self : Dict ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __lowercase ( self : int ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def __lowercase ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase )
def __lowercase ( self : List[str] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase )
def __lowercase ( self : Tuple ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
self.check_over_configs(thresholding=lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def __lowercase ( self : List[str] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase )
def __lowercase ( self : List[str] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase )
def __lowercase ( self : Any ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def __lowercase ( self : Any ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = self.dummy_sample_deter + 0.1
lowerCAmelCase = self.dummy_sample_deter - 0.1
lowerCAmelCase = samplea.shape[0]
lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase = torch.arange(lowerCAmelCase )[0:3, None].repeat(1 , lowerCAmelCase )
lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def __lowercase ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def __lowercase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 169
| 1
|
from typing import List
import numpy as np
def A (__A : dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {key: len(__A ) for key, value in gen_kwargs.items() if isinstance(__A , __A )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
UpperCAmelCase_ = max(lists_lengths.values() , default=0 )
return max(1 , __A )
def A (__A : int , __A : int ) -> List[range]:
"""simple docstring"""
UpperCAmelCase_ = []
for group_idx in range(__A ):
UpperCAmelCase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCAmelCase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCAmelCase_ = range(__A , start + num_shards_to_add )
shards_indices_per_group.append(__A )
return shards_indices_per_group
def A (__A : dict , __A : int ) -> List[dict]:
"""simple docstring"""
UpperCAmelCase_ = _number_of_shards_in_gen_kwargs(__A )
if num_shards == 1:
return [dict(__A )]
else:
UpperCAmelCase_ = _distribute_shards(num_shards=__A , max_num_jobs=__A )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__A , __A )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__A ) )
]
def A (__A : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __A )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A (__A : np.random.Generator , __A : dict ) -> dict:
"""simple docstring"""
UpperCAmelCase_ = {len(__A ) for value in gen_kwargs.values() if isinstance(__A , __A )}
UpperCAmelCase_ = {}
for size in list_sizes:
UpperCAmelCase_ = list(range(__A ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCAmelCase_ = dict(__A )
for key, value in shuffled_kwargs.items():
if isinstance(__A , __A ):
UpperCAmelCase_ = [value[i] for i in indices_per_size[len(__A )]]
return shuffled_kwargs
| 169
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( a , a ):
@register_to_config
def __init__( self : List[Any] , _snake_case : int = 128 , _snake_case : int = 256 , _snake_case : float = 2_0_0_0.0 , _snake_case : int = 768 , _snake_case : int = 12 , _snake_case : int = 12 , _snake_case : int = 64 , _snake_case : int = 2048 , _snake_case : float = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Sequential(
nn.Linear(_snake_case , d_model * 4 , bias=_snake_case) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_snake_case) , nn.SiLU() , )
UpperCAmelCase_ = nn.Embedding(_snake_case , _snake_case)
UpperCAmelCase_ = False
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Dropout(p=_snake_case)
UpperCAmelCase_ = nn.ModuleList()
for lyr_num in range(_snake_case):
# FiLM conditional T5 decoder
UpperCAmelCase_ = DecoderLayer(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case)
self.decoders.append(_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case)
UpperCAmelCase_ = nn.Dropout(p=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowerCamelCase ( self : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
UpperCAmelCase_ = self.conditioning_emb(_snake_case).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase_ = torch.broadcast_to(
torch.arange(_snake_case , device=decoder_input_tokens.device) , (batch, seq_length) , )
UpperCAmelCase_ = self.position_encoding(_snake_case)
UpperCAmelCase_ = self.continuous_inputs_projection(_snake_case)
inputs += position_encodings
UpperCAmelCase_ = self.dropout(_snake_case)
# decoder: No padding present.
UpperCAmelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase_ = [(x, self.encoder_decoder_mask(_snake_case , _snake_case)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
UpperCAmelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
UpperCAmelCase_ = lyr(
_snake_case , conditioning_emb=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )[0]
UpperCAmelCase_ = self.decoder_norm(_snake_case)
UpperCAmelCase_ = self.post_dropout(_snake_case)
UpperCAmelCase_ = self.spec_out(_snake_case)
return spec_out
class __snake_case ( nn.Module ):
def __init__( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int]=1e-6):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case))
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any]=None , _snake_case : Any=None , _snake_case : Any=None , _snake_case : Any=None , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer[0](
_snake_case , conditioning_emb=_snake_case , attention_mask=_snake_case , )
if encoder_hidden_states is not None:
UpperCAmelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
UpperCAmelCase_ = self.layer[1](
_snake_case , key_value_states=_snake_case , attention_mask=_snake_case , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase_ = self.layer[-1](_snake_case , _snake_case)
return (hidden_states,)
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : int , _snake_case : str , _snake_case : int , _snake_case : str):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = TaLayerNorm(_snake_case)
UpperCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case)
UpperCAmelCase_ = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : Dict=None , _snake_case : List[str]=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
if conditioning_emb is not None:
UpperCAmelCase_ = self.FiLMLayer(_snake_case , _snake_case)
# Self-attention block
UpperCAmelCase_ = self.attention(_snake_case)
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Any , _snake_case : Any , _snake_case : Any , _snake_case : Tuple , _snake_case : int , _snake_case : List[str]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case , eps=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any] , _snake_case : List[str]=None , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
UpperCAmelCase_ = self.attention(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=attention_mask.squeeze(1) , )
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return layer_output
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = TaDenseGatedActDense(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case)
UpperCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case , eps=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int=None):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
if conditioning_emb is not None:
UpperCAmelCase_ = self.film(_snake_case , _snake_case)
UpperCAmelCase_ = self.DenseReluDense(_snake_case)
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
UpperCAmelCase_ = NewGELUActivation()
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.act(self.wi_a(_snake_case))
UpperCAmelCase_ = self.wi_a(_snake_case)
UpperCAmelCase_ = hidden_gelu * hidden_linear
UpperCAmelCase_ = self.dropout(_snake_case)
UpperCAmelCase_ = self.wo(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Any , _snake_case : Optional[Any] , _snake_case : List[Any]=1e-6):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.ones(_snake_case))
UpperCAmelCase_ = eps
def lowerCamelCase ( self : Tuple , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=_snake_case)
UpperCAmelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase_ = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class __snake_case ( nn.Module ):
def lowerCamelCase ( self : Tuple , _snake_case : torch.Tensor):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.0_4_4_7_1_5 * torch.pow(_snake_case , 3.0))))
class __snake_case ( nn.Module ):
def __init__( self : int , _snake_case : int , _snake_case : Optional[Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(_snake_case , out_features * 2 , bias=_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.scale_bias(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_snake_case , 2 , -1)
UpperCAmelCase_ = x * (1 + scale) + shift
return x
| 169
| 1
|
from PIL import Image
def _A ( __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = 0
lowercase__ = image.load()
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
lowercase__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__magic_name__ ):
for i in range(__magic_name__ ):
lowercase__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 655
|
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 1
|
lowercase : Optional[int] = {str(digit): digit**5 for digit in range(1_0)}
def UpperCAmelCase_ ( _UpperCAmelCase ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_UpperCAmelCase ) )
def UpperCAmelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(_UpperCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 584
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a__ :
_A = OPTConfig
_A = {}
_A = "gelu"
def __init__( self : int , A_ : List[str] , A_ : Dict=13 , A_ : str=7 , A_ : Dict=True , A_ : int=False , A_ : Any=99 , A_ : Dict=16 , A_ : List[str]=2 , A_ : Dict=4 , A_ : Dict=4 , A_ : int="gelu" , A_ : Tuple=0.1 , A_ : Tuple=0.1 , A_ : Dict=20 , A_ : int=2 , A_ : List[Any]=1 , A_ : Optional[Any]=0 , A_ : Dict=16 , A_ : Dict=16 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = parent
lowerCamelCase_: Tuple = batch_size
lowerCamelCase_: str = seq_length
lowerCamelCase_: Any = is_training
lowerCamelCase_: Tuple = use_labels
lowerCamelCase_: Any = vocab_size
lowerCamelCase_: Optional[Any] = hidden_size
lowerCamelCase_: Any = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: Optional[Any] = intermediate_size
lowerCamelCase_: Optional[int] = hidden_act
lowerCamelCase_: Any = hidden_dropout_prob
lowerCamelCase_: Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_: List[Any] = max_position_embeddings
lowerCamelCase_: Union[str, Any] = eos_token_id
lowerCamelCase_: Optional[int] = pad_token_id
lowerCamelCase_: Optional[Any] = bos_token_id
lowerCamelCase_: List[Any] = embed_dim
lowerCamelCase_: Optional[Any] = word_embed_proj_dim
lowerCamelCase_: Any = False
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
lowerCamelCase_: Optional[Any] = prepare_opt_inputs_dict(A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : Any , A_ : Dict , A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = TFOPTModel(config=A_ )
lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""]
lowerCamelCase_: List[str] = input_ids[:1, :]
lowerCamelCase_: int = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_: Tuple = 1
# first forward pass
lowerCamelCase_: int = model(A_ , attention_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_: Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_: List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_: Any = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_: List[str] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_: List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_: Tuple = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
@require_tf
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_A = (TFOPTForCausalLM,) if is_tf_available() else ()
_A = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
_A = False
_A = False
_A = False
_A = 10
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = TFOPTModelTester(self )
lowerCamelCase_: Optional[Any] = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ : Optional[Any] , A_ : Union[str, Any] ):
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_: List[Any] = model_class(config=A_ )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A_ )
lowerCamelCase_: int = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_: List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_ )
# check that weights remain the same after resizing
lowerCamelCase_: int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Tuple = False
self.assertTrue(A_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_ )
lowerCamelCase_: Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Any = False
self.assertTrue(A_ )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return tf.constant(_UpperCAmelCase , dtype=tf.intaa )
@require_tf
class a__ ( unittest.TestCase ):
_A = 99
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_: int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_: Tuple = input_ids.shape[0]
lowerCamelCase_: Optional[int] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase_: Dict = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCamelCase_: Union[str, Any] = tf.not_equal(A_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_: Optional[int] = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state
lowerCamelCase_: Dict = (1, 11, 5_12)
self.assertEqual(output.shape , A_ )
lowerCamelCase_: int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-3 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: int = xla_generate(A_ , A_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-2 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_: List[str] = """facebook/opt-350m"""
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_: Tuple = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" , padding=A_ , add_special_tokens=A_ )
lowerCamelCase_: List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_: int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-125m"""
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: str = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[Any] = model.generate(A_ , max_length=10 )
lowerCamelCase_: List[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = """facebook/opt-350m"""
lowerCamelCase_: Optional[int] = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
lowerCamelCase_: Optional[int] = """left"""
# use different length sentences to test batching
lowerCamelCase_: str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase_: Any = tokenizer(A_ , return_tensors="""tf""" , padding=A_ )
lowerCamelCase_: int = inputs["""input_ids"""]
lowerCamelCase_: List[str] = model.generate(input_ids=A_ , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase_: Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[int] = model.generate(input_ids=A_ )
lowerCamelCase_: Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase_: Union[str, Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_: int = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowerCamelCase_: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Any = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-350m"""
lowerCamelCase_: Any = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: Dict = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: List[str] = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(A_ , max_length=10 )
lowerCamelCase_: Optional[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 584
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : List[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxAlbertModelTester(self )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained("""albert-base-v2""" )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
UpperCAmelCase = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase = (1, 11, 7_68)
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 673
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = TFViTModel(config=snake_case__ )
UpperCAmelCase = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(snake_case__ )
UpperCAmelCase = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(snake_case__ )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Optional[int] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_A : Optional[Any] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
_A : Optional[int] = False
_A : Any = False
_A : List[str] = False
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case__ )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(snake_case__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**snake_case__ )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
| 673
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'vocab_file': 'spiece.model'}
lowerCamelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
lowerCamelCase = '▁'
class snake_case_ ( a__ ):
"""simple docstring"""
__UpperCAmelCase =VOCAB_FILES_NAMES
__UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase =["""input_ids""", """attention_mask"""]
def __init__( self , _A , _A="</s>" , _A="<unk>" , _A="<pad>" , _A=1_0_0 , _A=None , _A = None , _A=True , **_A , ):
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [F"""<extra_id_{i}>""" for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda _A : bool('extra_id' in str(lowercase__ ) ) , lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
__lowerCAmelCase = legacy
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , extra_ids=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase__ , **lowercase__ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = extra_ids
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def A__ ( _A , _A , _A ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowerCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowercase__ , )
return max_model_length
@property
def A__ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def A__ ( self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , _A , _A = None , _A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def A__ ( self ):
return list(
set(filter(lambda _A : bool(re.search(R'<extra_id_\d+>' , lowercase__ ) ) is not None , self.additional_special_tokens ) ) )
def A__ ( self ):
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def A__ ( self , _A ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A__ ( self , _A , _A = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A__ ( self , _A , _A = None ):
__lowerCAmelCase = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , _A ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , _A , **_A ):
if not self.legacy:
__lowerCAmelCase = SPIECE_UNDERLINE + text.replace(lowercase__ , ' ' )
return super().tokenize(lowercase__ , **lowercase__ )
def A__ ( self , _A , **_A ):
if not self.legacy:
__lowerCAmelCase = text.startswith(lowercase__ )
if is_first:
__lowerCAmelCase = text[1:]
__lowerCAmelCase = self.sp_model.encode(lowercase__ , out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(lowercase__ ):
__lowerCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def A__ ( self , _A ):
if token.startswith('<extra_id_' ):
__lowerCAmelCase = re.match(R'<extra_id_(\d+)>' , lowercase__ )
__lowerCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def A__ ( self , _A ):
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(lowercase__ )
else:
__lowerCAmelCase = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def A__ ( self , _A ):
__lowerCAmelCase = []
__lowerCAmelCase = ''''''
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase__ )
__lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def A__ ( self , _A , _A = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 703
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""facebook/bart-large-mnli"""
__UpperCAmelCase =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__UpperCAmelCase ="""text_classifier"""
__UpperCAmelCase =AutoTokenizer
__UpperCAmelCase =AutoModelForSequenceClassification
__UpperCAmelCase =["""text""", ["""text"""]]
__UpperCAmelCase =["""text"""]
def A__ ( self ):
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__lowerCAmelCase = int(_A )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def A__ ( self , _A , _A ):
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(_A ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , _A ):
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 102
| 0
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__A = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__A = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 93
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 284
| 0
|
'''simple docstring'''
import operator
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE_ :str = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE_ :int = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE_ :Optional[int] = [arr.pop(0 )]
for i, item in enumerate(SCREAMING_SNAKE_CASE ):
if _operator(SCREAMING_SNAKE_CASE , sublist[-1] ):
sublist.append(SCREAMING_SNAKE_CASE )
arr.pop(SCREAMING_SNAKE_CASE )
# merging sublist into solution list
if not solution:
solution.extend(SCREAMING_SNAKE_CASE )
else:
while sublist:
SCREAMING_SNAKE_CASE_ :Dict = sublist.pop(0 )
for i, xx in enumerate(SCREAMING_SNAKE_CASE ):
if not _operator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
solution.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
break
else:
solution.append(SCREAMING_SNAKE_CASE )
strand_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 233
|
'''simple docstring'''
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE = 100_0000 ):
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233
| 1
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Tuple = """mask2former"""
a__ : List[str] = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 1_024 , SCREAMING_SNAKE_CASE = "relu" , SCREAMING_SNAKE_CASE = 6 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 2_048 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 255 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 12_544 , SCREAMING_SNAKE_CASE = 3.0 , SCREAMING_SNAKE_CASE = 0.75 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case : Union[str, Any] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Union[str, Any] = backbone_config.pop("model_type" )
snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case : int = config_class.from_dict(SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case : Any = backbone_config
snake_case : List[Any] = feature_size
snake_case : str = mask_feature_size
snake_case : Any = hidden_dim
snake_case : Optional[Any] = encoder_feedforward_dim
snake_case : Any = activation_function
snake_case : Optional[Any] = encoder_layers
snake_case : str = decoder_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : str = dropout
snake_case : Optional[Any] = dim_feedforward
snake_case : Optional[int] = pre_norm
snake_case : Optional[int] = enforce_input_projection
snake_case : Any = common_stride
snake_case : Optional[int] = ignore_value
snake_case : int = num_queries
snake_case : Optional[int] = no_object_weight
snake_case : Optional[Any] = class_weight
snake_case : int = mask_weight
snake_case : Dict = dice_weight
snake_case : int = train_num_points
snake_case : str = oversample_ratio
snake_case : List[Any] = importance_sample_ratio
snake_case : Any = init_std
snake_case : List[str] = init_xavier_std
snake_case : int = use_auxiliary_loss
snake_case : str = feature_strides
snake_case : List[Any] = output_auxiliary_logits
snake_case : Any = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = copy.deepcopy(self.__dict__ )
snake_case : str = self.backbone_config.to_dict()
snake_case : Optional[Any] = self.__class__.model_type
return output
| 134
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
__A = logging.getLogger(__name__)
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : Dict = git.Repo(search_parent_directories=lowercase__ )
snake_case : List[Any] = {
"repo_id": str(lowercase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowercase__ , "git_log.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ , indent=4 )
def UpperCamelCase__ ( lowercase__ : Any ):
if params.n_gpu <= 0:
snake_case : Optional[Any] = 0
snake_case : List[Any] = -1
snake_case : int = True
snake_case : str = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case : Tuple = int(os.environ["WORLD_SIZE"] )
snake_case : Dict = int(os.environ["N_GPU_NODE"] )
snake_case : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node
snake_case : str = params.global_rank // params.n_gpu_per_node
snake_case : Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case : Union[str, Any] = 1
snake_case : List[Any] = 0
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = 0
snake_case : List[str] = 1
snake_case : List[str] = 1
snake_case : Any = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0
snake_case : Union[str, Any] = params.n_nodes > 1
# summary
snake_case : Optional[Any] = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 134
| 1
|
A_ = 8.3144598
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A_ = 300
A_ = 28
A_ = rms_speed_of_molecule(temperature, molar_mass)
print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 479
|
import logging
from transformers import PretrainedConfig
A_ = logging.getLogger(__name__)
A_ = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class __lowercase ( _A ):
lowercase = 'bertabs'
def __init__( self : Dict , __lowerCamelCase : Tuple=3_05_22 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : Any=8 , __lowerCamelCase : Union[str, Any]=5_12 , __lowerCamelCase : Tuple=0.2 , __lowerCamelCase : str=6 , __lowerCamelCase : int=7_68 , __lowerCamelCase : int=8 , __lowerCamelCase : List[Any]=20_48 , __lowerCamelCase : Union[str, Any]=0.2 , **__lowerCamelCase : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 479
| 1
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__A = None
try:
import msvcrt
except ImportError:
__A = None
try:
import fcntl
except ImportError:
__A = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__A = OSError
# Data
# ------------------------------------------------
__A = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__A = """3.0.12"""
__A = None
def __A () ->Dict:
"""simple docstring"""
global _logger
lowerCAmelCase__ :Optional[int] = _logger or logging.getLogger(__name__ )
return _logger
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = lock_file
return None
def __str__( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowerCAmelCase__ :int = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
lowerCAmelCase__ :Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase__ :Any = None
# The default timeout value.
lowerCAmelCase__ :Optional[int] = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase__ :Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase__ :List[Any] = 0
return None
@property
def snake_case ( self ):
'''simple docstring'''
return self._lock_file
@property
def snake_case ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = float(__UpperCAmelCase )
return None
def snake_case ( self ):
'''simple docstring'''
raise NotImplementedError()
def snake_case ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def snake_case ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
if timeout is None:
lowerCAmelCase__ :Dict = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase__ :Dict = id(self )
lowerCAmelCase__ :List[str] = self._lock_file
lowerCAmelCase__ :str = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase__ :str = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def snake_case ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase__ :List[str] = id(self )
lowerCAmelCase__ :int = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowerCAmelCase__ :List[Any] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
lowerCAmelCase__ :List[str] = os.path.dirname(__UpperCAmelCase )
lowerCAmelCase__ :int = str(hash(__UpperCAmelCase ) )
lowerCAmelCase__ :List[Any] = filename[: max_length - len(__UpperCAmelCase ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
lowerCAmelCase__ :Any = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase__ :List[str] = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = fd
return None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self._lock_file_fd
lowerCAmelCase__ :int = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase__ :Optional[Any] = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = fd
return None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._lock_file_fd
lowerCAmelCase__ :Optional[Any] = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase__ :int = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
lowerCAmelCase__ :Optional[Any] = fd
return None
def snake_case ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
lowerCAmelCase__ :Union[str, Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__A = None
if msvcrt:
__A = WindowsFileLock
elif fcntl:
__A = UnixFileLock
else:
__A = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 93
|
import os
import sys
import unittest
lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= get_test_to_tester_mapping(lowerCAmelCase )
__lowercase= get_test_to_tester_mapping(lowerCAmelCase )
__lowercase= {'BertModelTest': 'BertModelTester'}
__lowercase= {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_test_mapping(lowerCAmelCase )
__lowercase= get_model_to_test_mapping(lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_tester_mapping(lowerCAmelCase )
__lowercase= get_model_to_tester_mapping(lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
| 230
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = np.inf
def set_batch_size(UpperCAmelCase ) -> None:
nonlocal batch_size
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_a : Union[str, Any] = min(UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_a : Optional[int] = min(UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and feature.dtype == "binary":
_a : List[Any] = min(UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCAmelCase , UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> Any:
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
_a : List[str] = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
_a : List[str] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_a : List[str] = Parquet(
cache_dir=lowercase , data_files=lowercase , features=lowercase , hash=lowercase , **lowercase , )
def snake_case__( self ) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
_a : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Any = None
_a : Dict = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
_a : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[int]:
_a : List[Any] = dataset
_a : int = path_or_buf
_a : Any = batch_size or get_writer_batch_size(dataset.features )
_a : str = parquet_writer_kwargs
def snake_case__( self ) -> int:
_a : List[Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_a : List[str] = self._write(file_obj=lowercase , batch_size=lowercase , **self.parquet_writer_kwargs )
else:
_a : Optional[Any] = self._write(file_obj=self.path_or_buf , batch_size=lowercase , **self.parquet_writer_kwargs )
return written
def snake_case__( self , lowercase , lowercase , **lowercase ) -> int:
_a : int = 0
_a : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase )
_a : Union[str, Any] = self.dataset.features.arrow_schema
_a : List[Any] = pq.ParquetWriter(lowercase , schema=lowercase , **lowercase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowercase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_a : List[Any] = query_table(
table=self.dataset._data , key=slice(lowercase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase )
written += batch.nbytes
writer.close()
return written
| 721
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
"""simple docstring"""
_a : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_a : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a : List[str] = ''''''
else:
_a : str = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : Optional[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_a : str = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a : Any = in_proj_weight[
: config.hidden_size, :
]
_a : Tuple = in_proj_bias[: config.hidden_size]
_a : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_a : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
_a : List[str] = dct.pop(UpperCAmelCase )
_a : Dict = val
def UpperCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
_a : str = DeiTConfig()
# all deit models have fine-tuned heads
_a : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_a : List[str] = 1000
_a : Tuple = '''huggingface/label-files'''
_a : Tuple = '''imagenet-1k-id2label.json'''
_a : Union[str, Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_a : List[str] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
_a : List[Any] = idalabel
_a : Any = {v: k for k, v in idalabel.items()}
_a : List[str] = int(deit_name[-6:-4] )
_a : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_a : List[Any] = 192
_a : Optional[Any] = 768
_a : Optional[int] = 12
_a : Union[str, Any] = 3
elif deit_name[9:].startswith('''small''' ):
_a : List[Any] = 384
_a : Tuple = 1536
_a : List[Any] = 12
_a : int = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_a : List[str] = 1024
_a : Dict = 4096
_a : List[Any] = 24
_a : Dict = 16
# load original model from timm
_a : int = timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a : List[Any] = timm_model.state_dict()
_a : Any = create_rename_keys(UpperCAmelCase , UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load HuggingFace model
_a : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase ).eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_a : str = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_a : Optional[Any] = DeiTImageProcessor(size=UpperCAmelCase , crop_size=config.image_size )
_a : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
_a : int = encoding['''pixel_values''']
_a : List[Any] = model(UpperCAmelCase )
_a : Tuple = timm_model(UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(F'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''PoolFormerFeatureExtractor''']
_A = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 431
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def __UpperCamelCase ( _A , _A = 16 ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
_A , batched=_A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
_A , padding='''longest''' , max_length=_A , pad_to_multiple_of=_A , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( _A , _A ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _A ) == "1":
lowerCAmelCase_ = 2
# New Code #
lowerCAmelCase_ = int(args.gradient_accumulation_steps )
lowerCAmelCase_ = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_A )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''] )
lowerCAmelCase_ = int(config['''seed'''] )
lowerCAmelCase_ = int(config['''batch_size'''] )
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_A )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(_A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
with LocalSGD(
accelerator=_A , model=_A , local_sgd_steps=_A , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_A ):
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = output.loss
accelerator.backward(_A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_A , references=_A , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _A )
def __UpperCamelCase ( ):
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_A , default=_A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_A , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_A , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 431
| 1
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ) -> Dict:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int]="attention" ) -> Dict:
lowerCamelCase_ : List[str] =np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowerCamelCase_ : Optional[int] =k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase_ : List[Any] =np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowerCamelCase_ : Optional[Any] =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase_ : int =np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowerCamelCase_ : Tuple =q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase_ : List[Any] =np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowerCamelCase_ : Union[str, Any] =v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : str=False ) -> Tuple:
if split_mlp_wi:
lowerCamelCase_ : Tuple =params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowerCamelCase_ : Tuple =params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowerCamelCase_ : Union[str, Any] =(wi_a, wi_a)
else:
lowerCamelCase_ : Dict =params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowerCamelCase_ : str =params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def _snake_case ( lowerCamelCase__ : dict , *, lowerCamelCase__ : int , lowerCamelCase__ : bool , lowerCamelCase__ : bool = False ) -> int:
lowerCamelCase_ : Union[str, Any] =traverse_util.flatten_dict(variables["target"] )
lowerCamelCase_ : Union[str, Any] ={"/".join(lowerCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase_ : int ="encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , lowerCamelCase__ )
lowerCamelCase_ : Any =collections.OrderedDict()
# Shared embeddings.
lowerCamelCase_ : Tuple =old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ : str =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_attention_layer_norm" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple =tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "attention" )
lowerCamelCase_ : int =layer_norm
lowerCamelCase_ : Dict =k.T
lowerCamelCase_ : Dict =o.T
lowerCamelCase_ : int =q.T
lowerCamelCase_ : str =v.T
# Block i, layer 1 (MLP).
lowerCamelCase_ : Any =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_mlp_layer_norm" )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =layer_norm
if split_mlp_wi:
lowerCamelCase_ : List[Any] =wi[0].T
lowerCamelCase_ : List[Any] =wi[1].T
else:
lowerCamelCase_ : List[str] =wi.T
lowerCamelCase_ : Any =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase_ : Optional[Any] =tax_relpos_bias_lookup(
lowerCamelCase__ , lowerCamelCase__ , "encoder" ).T
lowerCamelCase_ : Tuple =old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowerCamelCase_ : Dict =tax_relpos_bias_lookup(
lowerCamelCase__ , 0 , "encoder" ).T
lowerCamelCase_ : List[Any] =tax_relpos_bias_lookup(
lowerCamelCase__ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ : str =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_self_attention_layer_norm" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] =tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "self_attention" )
lowerCamelCase_ : int =layer_norm
lowerCamelCase_ : List[str] =k.T
lowerCamelCase_ : Optional[int] =o.T
lowerCamelCase_ : List[str] =q.T
lowerCamelCase_ : Union[str, Any] =v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase_ : Optional[Any] =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_cross_attention_layer_norm" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "encoder_decoder_attention" )
lowerCamelCase_ : List[str] =layer_norm
lowerCamelCase_ : Optional[Any] =k.T
lowerCamelCase_ : Optional[int] =o.T
lowerCamelCase_ : str =q.T
lowerCamelCase_ : Dict =v.T
# Block i, layer 2 (MLP).
lowerCamelCase_ : List[str] =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_mlp_layer_norm" )
lowerCamelCase_ , lowerCamelCase_ : List[str] =tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =layer_norm
if split_mlp_wi:
lowerCamelCase_ : str =wi[0].T
lowerCamelCase_ : Optional[int] =wi[1].T
else:
lowerCamelCase_ : Tuple =wi.T
lowerCamelCase_ : Tuple =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase_ : Union[str, Any] =tax_relpos_bias_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" ).T
lowerCamelCase_ : str =old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase_ : Union[str, Any] =old["decoder/logits_dense/kernel"].T
return new
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : bool ) -> List[str]:
lowerCamelCase_ : Optional[int] =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ : Union[str, Any] =state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ : Optional[int] =state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowerCamelCase_ : Any =state_dict["shared.weight"]
return state_dict
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Dict ) -> Union[str, Any]:
lowerCamelCase_ : Optional[Any] =checkpoints.load_tax_checkpoint(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =convert_tax_to_pytorch(
lowerCamelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase__ , scalable_attention=lowerCamelCase__ )
lowerCamelCase_ : Dict =make_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , ) -> Optional[int]:
lowerCamelCase_ : str =MTaConfig.from_json_file(lowerCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase_ : Any =UMTaEncoderModel(lowerCamelCase__ )
else:
lowerCamelCase_ : Dict =UMTaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase__ )
print("Done" )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
A__ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 244
|
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str ) -> list:
lowerCamelCase_ : Union[str, Any] =[0] * len(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
# use last results for better performance - dynamic programming
lowerCamelCase_ : List[str] =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCamelCase_ : List[str] =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCamelCase_ : str =j
return prefix_result
def _snake_case ( lowerCamelCase__ : str ) -> int:
return max(prefix_function(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
# TODO Update this
__snake_case : Union[str, Any] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'esm'
def __init__( self : Tuple , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=10_26 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : List[Any]="absolute" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , mask_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Tuple =num_hidden_layers
A__ : List[str] =num_attention_heads
A__ : Tuple =intermediate_size
A__ : int =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Tuple =max_position_embeddings
A__ : List[Any] =initializer_range
A__ : Optional[Any] =layer_norm_eps
A__ : Union[str, Any] =position_embedding_type
A__ : str =use_cache
A__ : Optional[int] =emb_layer_norm_before
A__ : Union[str, Any] =token_dropout
A__ : Tuple =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
A__ : Optional[int] =EsmFoldConfig()
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =EsmFoldConfig(**lowerCAmelCase_ )
A__ : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
A__ : List[str] =get_default_vocab_list()
else:
A__ : List[str] =vocab_list
else:
A__ : Union[str, Any] =None
A__ : List[Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCAmelCase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Dict =super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase_ ):
A__ : Union[str, Any] =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = None
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = 0
__snake_case = True
__snake_case = False
__snake_case = 128
__snake_case = None
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if self.trunk is None:
A__ : int =TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase_ ):
A__ : str =TrunkConfig(**self.trunk )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =asdict(self )
A__ : Tuple =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 48
__snake_case = 1024
__snake_case = 128
__snake_case = 32
__snake_case = 32
__snake_case = 32
__snake_case = 0
__snake_case = 0
__snake_case = False
__snake_case = 4
__snake_case = 128
__snake_case = None
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
if self.structure_module is None:
A__ : Dict =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase_ ):
A__ : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : List[str] =self.sequence_state_dim // self.sequence_head_width
A__ : Optional[int] =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : int =asdict(self )
A__ : Optional[Any] =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 384
__snake_case = 128
__snake_case = 16
__snake_case = 128
__snake_case = 12
__snake_case = 4
__snake_case = 8
__snake_case = 0.1
__snake_case = 8
__snake_case = 1
__snake_case = 2
__snake_case = 7
__snake_case = 10
__snake_case = 1E-8
__snake_case = 1E5
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return asdict(self )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 215
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = '''0'''
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
UpperCAmelCase_ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
UpperCAmelCase_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase_ = ort.RunOptions()
UpperCAmelCase_ = 1_2_8
UpperCAmelCase_ = 1
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = 2_0_0_0
UpperCAmelCase_ = {}
for iter in range(max_iters):
UpperCAmelCase_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 264
| 0
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A = '''\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
'''
A = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
A = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str] ) -> Any:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='http://www.cs.umd.edu/~snover/tercom/' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] ,reference_urls=[
'https://github.com/jhclark/tercom',
] ,)
def _lowerCamelCase ( self : int ,UpperCamelCase : List[str] ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : bool = False ,UpperCamelCase : bool = False ,UpperCamelCase : bool = False ,UpperCamelCase : bool = False ,) -> Optional[Any]:
_lowercase : Dict = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_lowercase : int = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
_lowercase : Dict = TER(
normalized=SCREAMING_SNAKE_CASE__ ,no_punct=SCREAMING_SNAKE_CASE__ ,asian_support=SCREAMING_SNAKE_CASE__ ,case_sensitive=SCREAMING_SNAKE_CASE__ ,)
_lowercase : Dict = sb_ter.corpus_score(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 125
|
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652
| 0
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCamelCase_ : Dict = HfApi()
lowerCamelCase_ : str = {}
# fmt: off
lowerCamelCase_ : Tuple = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowerCamelCase_ : List[str] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowerCamelCase_ : Dict = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowerCamelCase_ : Optional[Any] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowerCamelCase_ : str = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowerCamelCase_ : Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowerCamelCase_ : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowerCamelCase_ : Union[str, Any] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowerCamelCase_ : int = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowerCamelCase_ : Any = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowerCamelCase_ : Optional[Any] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowerCamelCase_ : List[str] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowerCamelCase_ : str = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowerCamelCase_ : Optional[Any] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowerCamelCase_ : Optional[int] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowerCamelCase_ : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCamelCase_ : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
lowerCamelCase_ : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
lowerCamelCase_ : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCamelCase_ : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCamelCase_ : Union[str, Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 345
|
import os
def __lowercase( ) -> Tuple:
with open(os.path.dirname(__snake_case ) + '/grid.txt' ) as f:
__snake_case = [] # noqa: E741
for _ in range(20 ):
l.append([int(__snake_case ) for x in f.readline().split()] )
__snake_case = 0
# right
for i in range(20 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__snake_case = temp
# down
for i in range(17 ):
for j in range(20 ):
__snake_case = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__snake_case = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__snake_case = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
__snake_case = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__snake_case = temp
return maximum
if __name__ == "__main__":
print(solution())
| 345
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase : Optional[int] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __magic_name__ :
A: Tuple = PegasusConfig
A: List[Any] = {}
A: Union[str, Any] = "gelu"
def __init__( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : str=99 , lowerCamelCase__ : str=32 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=20 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Optional[int]=0 , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : List[str] = seq_length
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : Any = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : Optional[Any] = eos_token_id
UpperCamelCase__ : Dict = pad_token_id
UpperCamelCase__ : List[str] = bos_token_id
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCamelCase__ : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ : Tuple = prepare_pegasus_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : str = 20
UpperCamelCase__ : Union[str, Any] = model_class_name(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCamelCase__ : str = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCamelCase__ : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
UpperCamelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCamelCase__ : int = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
UpperCamelCase__ : List[str] = model.decode(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : str = 20
UpperCamelCase__ : Any = model_class_name(lowerCamelCase__ )
UpperCamelCase__ : int = model.encode(inputs_dict['''input_ids'''] )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCamelCase__ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase__ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase__ : Any = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
UpperCamelCase__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCamelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
UpperCamelCase__ : Union[str, Any] = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
UpperCamelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase__ : str = np.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCamelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
A: Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
A: Any = True
A: Optional[Any] = False
A: Tuple = False
A: Optional[int] = False
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : Tuple = FlaxPegasusModelTester(self )
UpperCamelCase__ : int = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : str = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ : int , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : Union[str, Any] ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : List[Any] = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : Dict = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Dict = model_class(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCamelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : List[Any] = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : List[str] = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=lowerCamelCase__ )
UpperCamelCase__ : Any = np.ones((1, 1) )
UpperCamelCase__ : Optional[Any] = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
UpperCamelCase__ : str = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
UpperCamelCase__ : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase__ : Dict = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
UpperCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase__ , return_tensors='''np''' , truncation=lowerCamelCase__ , max_length=512 , padding=lowerCamelCase__ )
UpperCamelCase__ : int = model.generate(**lowerCamelCase__ , num_beams=2 ).sequences
UpperCamelCase__ : int = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
assert tgt_text == decoded
| 106
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Optional[int] = BarthezTokenizer
A: Optional[int] = BarthezTokenizerFast
A: str = True
A: Optional[int] = True
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
super().setUp()
UpperCamelCase__ : Optional[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
UpperCamelCase__ : int = tokenizer
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''<pad>'''
UpperCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase__ : Any = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase__ : Optional[int] = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : List[Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.get_rust_tokenizer()
UpperCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ : int = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCamelCase__ , )
| 106
| 1
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
A = logging.get_logger(__name__)
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : Optional[int] , _lowercase : int ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : List[Any] ):
"""simple docstring"""
if len(_lowercase ) == 0 or len(_lowercase ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(_lowercase ) )
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ = [sequences]
UpperCAmelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , _lowercase : Tuple=ZeroShotClassificationArgumentHandler() , *_lowercase : int , **_lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = args_parser
super().__init__(*_lowercase , **_lowercase )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _UpperCAmelCase ( self : str , _lowercase : Any , _lowercase : Tuple=True , _lowercase : Optional[Any]=True , _lowercase : List[Any]=TruncationStrategy.ONLY_FIRST , **_lowercase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase__ = self.tokenizer.eos_token
try:
UpperCAmelCase__ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=_lowercase , )
except Exception as e:
if "too short" in str(_lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase__ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCAmelCase ( self : Tuple , **_lowercase : Optional[Any] ):
"""simple docstring"""
if kwargs.get("multi_class" , _lowercase ) is not None:
UpperCAmelCase__ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase__ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase__ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase__ = kwargs["hypothesis_template"]
UpperCAmelCase__ = {}
if "multi_label" in kwargs:
UpperCAmelCase__ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , _lowercase : Union[str, List[str]] , *_lowercase : Dict , **_lowercase : int , ):
"""simple docstring"""
if len(_lowercase ) == 0:
pass
elif len(_lowercase ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase__ = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(_lowercase , **_lowercase )
def _UpperCAmelCase ( self : Tuple , _lowercase : Any , _lowercase : str=None , _lowercase : List[Any]="This example is {}." ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self._args_parser(_lowercase , _lowercase , _lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_lowercase , _lowercase ) ):
UpperCAmelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_lowercase ) - 1,
**model_input,
}
def _UpperCAmelCase ( self : Optional[int] , _lowercase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = inputs["candidate_label"]
UpperCAmelCase__ = inputs["sequence"]
UpperCAmelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase__ = self.model(**_lowercase )
UpperCAmelCase__ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _UpperCAmelCase ( self : int , _lowercase : Optional[Any] , _lowercase : str=False ):
"""simple docstring"""
UpperCAmelCase__ = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase__ = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase__ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase__ = logits.shape[0]
UpperCAmelCase__ = len(_lowercase )
UpperCAmelCase__ = N // n
UpperCAmelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase__ = self.entailment_id
UpperCAmelCase__ = -1 if entailment_id == 0 else 0
UpperCAmelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase__ = np.exp(_lowercase ) / np.exp(_lowercase ).sum(-1 , keepdims=_lowercase )
UpperCAmelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase__ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase__ = np.exp(_lowercase ) / np.exp(_lowercase ).sum(-1 , keepdims=_lowercase )
UpperCAmelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 475
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_tool("text-to-speech" )
self.tool.setup()
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 475
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = DebertaTokenizer
UpperCAmelCase_ = True
UpperCAmelCase_ = DebertaTokenizerFast
def A_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
SCREAMING_SNAKE_CASE__ : List[Any] = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"unk_token": "[UNK]"}
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Union[str, Any], **_UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = "lower newer"
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
return input_text, output_text
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = "lower newer"
SCREAMING_SNAKE_CASE__ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : int = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer("Hello", "World" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"], _UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode("sequence builders", add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode("multi-sequence build", add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
"sequence builders", add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(
"sequence builders", "multi-sequence build", add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase, _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_class.from_pretrained("microsoft/deberta-base" )
SCREAMING_SNAKE_CASE__ : Any = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) for seq in encoding["input_ids"]]
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"input_ids": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data, _UpperCAmelCase )
for expected, decoded in zip(_UpperCAmelCase, _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
| 157
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : List[Any], _UpperCAmelCase : Any=None, _UpperCAmelCase : int=None, **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def __call__( self : Optional[Any], *_UpperCAmelCase : Any, **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : str = args[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Any = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : int = encodings["input_ids"]
return inputs
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def A_ ( self : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any=False, _UpperCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : List[Any] = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : str = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : List[str] = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : int = end_token.group()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : List[str] = value[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Tuple = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : str = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : int = output[key][0]
SCREAMING_SNAKE_CASE__ : int = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : Any ) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 157
| 1
|
'''simple docstring'''
from string import ascii_uppercase
a : str = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
__snake_case = ""
__snake_case = 0
__snake_case = 0
while div != 1:
__snake_case , __snake_case = divmod(_UpperCAmelCase , _UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
__snake_case = ALPHABET_VALUES[str(_UpperCAmelCase )]
else:
__snake_case = str(_UpperCAmelCase )
new_value += actual_value
__snake_case = num // base
__snake_case = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 69
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int = 1_00_00_00 ) -> int:
__snake_case = 1
__snake_case = 1
__snake_case = {1: 1}
for inputa in range(2 , _UpperCAmelCase ):
__snake_case = 0
__snake_case = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case = counter
if counter > pre_counter:
__snake_case = inputa
__snake_case = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 69
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 1
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "xlnet"
UpperCAmelCase = ["mems"]
UpperCAmelCase = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __lowerCamelCase : str=3_2_0_0_0 , __lowerCamelCase : Tuple=1_0_2_4 , __lowerCamelCase : Tuple=2_4 , __lowerCamelCase : Optional[int]=1_6 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple="bi" , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Optional[Any]=1e-12 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple=5_1_2 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=False , __lowerCamelCase : int=False , __lowerCamelCase : List[Any]=-1 , __lowerCamelCase : int=False , __lowerCamelCase : Any="last" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]="tanh" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Any=5 , __lowerCamelCase : str=5 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Dict=2 , **__lowerCamelCase : Tuple , ):
UpperCAmelCase__ :List[Any] = vocab_size
UpperCAmelCase__ :List[Any] = d_model
UpperCAmelCase__ :Any = n_layer
UpperCAmelCase__ :Optional[int] = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
UpperCAmelCase__ :int = d_model // n_head
UpperCAmelCase__ :List[str] = ff_activation
UpperCAmelCase__ :Union[str, Any] = d_inner
UpperCAmelCase__ :Dict = untie_r
UpperCAmelCase__ :List[Any] = attn_type
UpperCAmelCase__ :List[Any] = initializer_range
UpperCAmelCase__ :str = layer_norm_eps
UpperCAmelCase__ :int = dropout
UpperCAmelCase__ :Tuple = mem_len
UpperCAmelCase__ :Optional[Any] = reuse_len
UpperCAmelCase__ :Union[str, Any] = bi_data
UpperCAmelCase__ :List[str] = clamp_len
UpperCAmelCase__ :Tuple = same_length
UpperCAmelCase__ :str = summary_type
UpperCAmelCase__ :Optional[Any] = summary_use_proj
UpperCAmelCase__ :Union[str, Any] = summary_activation
UpperCAmelCase__ :Union[str, Any] = summary_last_dropout
UpperCAmelCase__ :int = start_n_top
UpperCAmelCase__ :Optional[Any] = end_n_top
UpperCAmelCase__ :List[Any] = bos_token_id
UpperCAmelCase__ :str = pad_token_id
UpperCAmelCase__ :Tuple = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCamelCase , )
UpperCAmelCase__ :Dict = kwargs['''use_cache''']
UpperCAmelCase__ :int = use_mems_eval
UpperCAmelCase__ :List[str] = use_mems_train
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 467
|
'''simple docstring'''
def a__ ( UpperCamelCase_ : int | float | str ):
try:
UpperCAmelCase__ :Union[str, Any] = float(UpperCamelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
UpperCAmelCase__ :List[str] = decimal - int(UpperCamelCase_ )
if fractional_part == 0:
return int(UpperCamelCase_ ), 1
else:
UpperCAmelCase__ :List[Any] = len(str(UpperCamelCase_ ).split('''.''' )[1] )
UpperCAmelCase__ :Tuple = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase__ :int = 10**number_of_frac_digits
UpperCAmelCase__ , UpperCAmelCase__ :List[str] = denominator, numerator
while True:
UpperCAmelCase__ :Optional[Any] = dividend % divisor
if remainder == 0:
break
UpperCAmelCase__ , UpperCAmelCase__ :List[str] = divisor, remainder
UpperCAmelCase__ , UpperCAmelCase__ :Tuple = numerator / divisor, denominator / divisor
return int(UpperCamelCase_ ), int(UpperCamelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 467
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCamelCase__ : Optional[int] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCamelCase__ : List[str] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Tuple = torch.manual_seed(0 )
UpperCamelCase__ : Any = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
UpperCamelCase__ : Optional[int] = output.images
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase__ : List[Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCamelCase__ : Tuple = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCamelCase__ : Optional[Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Optional[Any] = torch.manual_seed(0 )
UpperCamelCase__ : Any = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase__ : Dict = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCamelCase__ : Union[str, Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
UpperCamelCase__ : List[str] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Tuple = torch.manual_seed(0 )
UpperCamelCase__ : Any = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase__ : Dict = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 462
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''M-CLIP'''
def __init__( self , __SCREAMING_SNAKE_CASE=1_0_2_4 , __SCREAMING_SNAKE_CASE=7_6_8 , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = transformerDimSize
UpperCamelCase__ : Any = imageDimSize
super().__init__(**__SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MCLIPConfig
def __init__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = XLMRobertaModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.transformer(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__SCREAMING_SNAKE_CASE ), embs
| 462
| 1
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return x + 2
class _snake_case ( unittest.TestCase):
def A__ ( self : Union[str, Any] ):
lowercase__ = "x = 3"
lowercase__ = {}
lowercase__ = evaluate(a__, {}, state=a__ )
assert result == 3
self.assertDictEqual(a__, {"x": 3} )
lowercase__ = "x = y"
lowercase__ = {"y": 5}
lowercase__ = evaluate(a__, {}, state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__, {"x": 5, "y": 5} )
def A__ ( self : str ):
lowercase__ = "y = add_two(x)"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {"add_two": add_two}, state=a__ )
assert result == 5
self.assertDictEqual(a__, {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(a__, {}, state=a__ )
assert result is None
assert "tried to execute add_two" in out.out
def A__ ( self : Any ):
lowercase__ = "x = 3"
lowercase__ = {}
lowercase__ = evaluate(a__, {}, state=a__ )
assert result == 3
self.assertDictEqual(a__, {"x": 3} )
def A__ ( self : Dict ):
lowercase__ = "test_dict = {'x': x, 'y': add_two(x)}"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {"add_two": add_two}, state=a__ )
self.assertDictEqual(a__, {"x": 3, "y": 5} )
self.assertDictEqual(a__, {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def A__ ( self : Dict ):
lowercase__ = "x = 3\ny = 5"
lowercase__ = {}
lowercase__ = evaluate(a__, {}, state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__, {"x": 3, "y": 5} )
def A__ ( self : str ):
lowercase__ = "text = f'This is x: {x}.'"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {}, state=a__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a__, {"x": 3, "text": "This is x: 3."} )
def A__ ( self : Union[str, Any] ):
lowercase__ = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {}, state=a__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a__, {"x": 3, "y": 2} )
lowercase__ = {"x": 8}
lowercase__ = evaluate(a__, {}, state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__, {"x": 8, "y": 5} )
def A__ ( self : List[Any] ):
lowercase__ = "test_list = [x, add_two(x)]"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {"add_two": add_two}, state=a__ )
self.assertListEqual(a__, [3, 5] )
self.assertDictEqual(a__, {"x": 3, "test_list": [3, 5]} )
def A__ ( self : Any ):
lowercase__ = "y = x"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {}, state=a__ )
assert result == 3
self.assertDictEqual(a__, {"x": 3, "y": 3} )
def A__ ( self : Tuple ):
lowercase__ = "test_list = [x, add_two(x)]\ntest_list[1]"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {"add_two": add_two}, state=a__ )
assert result == 5
self.assertDictEqual(a__, {"x": 3, "test_list": [3, 5]} )
lowercase__ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowercase__ = {"x": 3}
lowercase__ = evaluate(a__, {"add_two": add_two}, state=a__ )
assert result == 5
self.assertDictEqual(a__, {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def A__ ( self : List[Any] ):
lowercase__ = "x = 0\nfor i in range(3):\n x = i"
lowercase__ = {}
lowercase__ = evaluate(a__, {"range": range}, state=a__ )
assert result == 2
self.assertDictEqual(a__, {"x": 2, "i": 2} )
| 413
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] ,a__ : List[str]=None ,**a__ : List[str] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." ,a__ ,)
super().__init__(args=a__ ,**a__ )
| 331
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = KandinskyVaaInpaintPipeline
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__lowerCamelCase = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Dict = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Dict = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = self.dummy_unet
snake_case__ : Dict = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
snake_case__ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create init_image
snake_case__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : List[str] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((256, 256) )
# create mask
snake_case__ : str = np.ones((64, 64) , dtype=np.floataa )
snake_case__ : str = 0
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : Any = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu'
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[str] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ : int = output.images
snake_case__ : List[Any] = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
snake_case__ : Tuple = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
snake_case__ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ : Dict = np.ones((768, 768) , dtype=np.floataa )
snake_case__ : List[Any] = 0
snake_case__ : Optional[int] = 'a hat'
snake_case__ : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case__ : Tuple = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
snake_case__ : List[str] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ : Any = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ : List[str] = pipeline(
image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 713
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Optional[int]:
snake_case__ : List[str] = {}
if train_file is not None:
snake_case__ : Tuple = [train_file]
if eval_file is not None:
snake_case__ : Dict = [eval_file]
if test_file is not None:
snake_case__ : str = [test_file]
snake_case__ : Optional[Any] = datasets.load_dataset('csv' , data_files=A__ )
snake_case__ : Any = list(ds[list(files.keys() )[0]].features.keys() )
snake_case__ : Optional[Any] = features_name.pop(A__ )
snake_case__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case__ : str = {label: i for i, label in enumerate(A__ )}
snake_case__ : int = tokenizer.model_input_names
snake_case__ : int = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case__ : str = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case__ : Optional[int] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case__ : int = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case__ : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case__ : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case__ : Any = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case__ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case__ : Optional[int] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case__ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case__ : List[str] = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class __snake_case :
__lowerCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the training file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the development file"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """The path of the test file"""} )
__lowerCamelCase = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCamelCase = field(default=_lowerCamelCase ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
def UpperCamelCase__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case__ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case__ : Any = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Any = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 699
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 472
|
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 472
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "lilt"
def __init__( self : int , _a : Dict=3_0522 , _a : str=768 , _a : Optional[int]=12 , _a : str=12 , _a : List[str]=3072 , _a : Any="gelu" , _a : int=0.1 , _a : Any=0.1 , _a : Union[str, Any]=512 , _a : str=2 , _a : List[Any]=0.02 , _a : str=1E-12 , _a : str=0 , _a : str="absolute" , _a : Any=None , _a : Dict=4 , _a : int=1024 , **_a : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =classifier_dropout
_SCREAMING_SNAKE_CASE =channel_shrink_ratio
_SCREAMING_SNAKE_CASE =max_ad_position_embeddings
| 702
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case_ : Optional[Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 191
| 0
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __a ( unittest.TestCase ):
def __init__( self : Dict , snake_case_ : int , snake_case_ : List[Any]=13 , snake_case_ : Union[str, Any]=7 , snake_case_ : int=True , snake_case_ : List[str]=True , snake_case_ : List[Any]=True , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=99 , snake_case_ : List[str]=32 , snake_case_ : int=5 , snake_case_ : Optional[Any]=4 , snake_case_ : List[str]=37 , snake_case_ : Dict="gelu" , snake_case_ : str=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Dict=5_12 , snake_case_ : List[str]=16 , snake_case_ : str=2 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : str=4 , )-> Tuple:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_attention_mask
__lowerCAmelCase =use_token_type_ids
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_act
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =type_vocab_size
__lowerCAmelCase =type_sequence_label_size
__lowerCAmelCase =initializer_range
__lowerCAmelCase =num_choices
def UpperCamelCase ( self : List[Any])-> str:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =None
if self.use_attention_mask:
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase =None
if self.use_token_type_ids:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self : int)-> Tuple:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCamelCase ( self : int)-> Union[str, Any]:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase =True
__lowerCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self : Optional[Any])-> Optional[Any]:
__lowerCAmelCase =FlaxRobertaModelTester(self)
@slow
def UpperCamelCase ( self : str)-> List[str]:
for model_class_name in self.all_model_classes:
__lowerCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=snake_case_)
__lowerCAmelCase =model(np.ones((1, 1)))
self.assertIsNotNone(snake_case_)
| 354
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase_ = datasets.load_iris()
lowercase_ = np.array(data['''data'''])
lowercase_ = np.array(data['''target'''])
lowercase_ = data['''target_names''']
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(X, y)
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
return np.linalg.norm(np.array(__lowerCamelCase ) - np.array(__lowerCamelCase ) )
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=5 ) -> str:
__lowerCAmelCase =zip(__lowerCamelCase , __lowerCamelCase )
# List of distances of all points from the point to be classified
__lowerCAmelCase =[]
for data_point in data:
__lowerCAmelCase =euclidean_distance(data_point[0] , __lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__lowerCAmelCase =[i[1] for i in sorted(__lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowerCAmelCase =Counter(__lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 354
| 1
|
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCamelCase ( __snake_case : Dataset, __snake_case : Dict[str, str] ) -> Any:
"""simple docstring"""
A__ : List[str] =args.log_outputs
A__ : Any ="""_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
A__ : int =load_metric("""wer""" )
A__ : Any =load_metric("""cer""" )
# compute metrics
A__ : Union[str, Any] =wer.compute(references=result["""target"""], predictions=result["""prediction"""] )
A__ : int =cer.compute(references=result["""target"""], predictions=result["""prediction"""] )
# print & log results
A__ : Union[str, Any] =f"WER: {wer_result}\nCER: {cer_result}"
print(__snake_case )
with open(f"{dataset_id}_eval_results.txt", """w""" ) as f:
f.write(__snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ : List[str] =f"log_{dataset_id}_predictions.txt"
A__ : List[Any] =f"log_{dataset_id}_targets.txt"
with open(__snake_case, """w""" ) as p, open(__snake_case, """w""" ) as t:
# mapping function to write output
def write_to_file(__snake_case : Optional[Any], __snake_case : Dict ):
p.write(f"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(__snake_case, with_indices=__snake_case )
def __lowerCamelCase ( __snake_case : str ) -> str:
"""simple docstring"""
A__ : int ="""[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ : Any =re.sub(__snake_case, """""", text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ : Dict =["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
A__ : List[str] =""" """.join(text.split(__snake_case ) )
return text
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ : Union[str, Any] =load_dataset(args.dataset, args.config, split=args.split, use_auth_token=__snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ : str =AutoFeatureExtractor.from_pretrained(args.model_id )
A__ : List[Any] =feature_extractor.sampling_rate
# resample audio
A__ : Optional[int] =dataset.cast_column("""audio""", Audio(sampling_rate=__snake_case ) )
# load eval pipeline
if args.device is None:
A__ : List[str] =0 if torch.cuda.is_available() else -1
A__ : Tuple =pipeline("""automatic-speech-recognition""", model=args.model_id, device=args.device )
# map function to decode audio
def map_to_pred(__snake_case : int ):
A__ : Optional[Any] =asr(
batch["""audio"""]["""array"""], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s )
A__ : Optional[Any] =prediction["""text"""]
A__ : str =normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
A__ : Optional[int] =dataset.map(__snake_case, remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__snake_case, __snake_case )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
__snake_case : List[str] = parser.parse_args()
main(args)
| 687
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=__snake_case):
__lowerCamelCase = ["torch", "torchsde"]
def __init__(self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def A (cls , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def A (cls , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
| 574
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Any ):
A__ = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
A__ = F"""{src_lang}-{tgt_lang}"""
A__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
A__ = os.path.join(UpperCamelCase , """README.md""" )
print(F"""Generating {path}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(UpperCamelCase )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split("-")
lowerCamelCase__ = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 574
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
a = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = BigBirdTokenizer
UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
UpperCAmelCase : List[int] = []
def __init__( self : Optional[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str="<unk>" , _UpperCAmelCase : Tuple="<s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : Optional[int]="[SEP]" , _UpperCAmelCase : List[Any]="[MASK]" , _UpperCAmelCase : Tuple="[CLS]" , **_UpperCAmelCase : Union[str, Any] , ):
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_A = vocab_file
_A = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 505
|
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
_A = name
_A = value
_A = weight
def __repr__( self : Dict ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowerCAmelCase_ ( self : List[Any] ):
return self.value
def lowerCAmelCase_ ( self : Any ):
return self.name
def lowerCAmelCase_ ( self : Optional[int] ):
return self.weight
def lowerCAmelCase_ ( self : int ):
return self.value / self.weight
def _snake_case ( _snake_case : Any , _snake_case : int , _snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = []
for i in range(len(_snake_case ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple ) -> Any:
'''simple docstring'''
_A = sorted(_snake_case , key=_snake_case , reverse=_snake_case )
_A = []
_A , _A = 0.0, 0.0
for i in range(len(_snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
A = list[tuple[int, int]]
A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Tuple = pos_x
__a : str = pos_y
__a : Any = (pos_y, pos_x)
__a : Tuple = goal_x
__a : Optional[int] = goal_y
__a : Union[str, Any] = parent
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , _UpperCAmelCase )
__a : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , _UpperCAmelCase )
__a : Tuple = [self.start]
__a : Optional[Any] = False
def _lowerCamelCase ( self ):
while self.node_queue:
__a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__a : Any = True
return self.retrace_path(_UpperCAmelCase )
__a : Tuple = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = []
for action in delta:
__a : Optional[Any] = parent.pos_x + action[1]
__a : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , _UpperCAmelCase ) )
return successors
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = node
__a : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a : int = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Tuple = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
__a : str = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = False
def _lowerCamelCase ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__a : Optional[int] = self.fwd_bfs.node_queue.pop(0 )
__a : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__a : Optional[int] = True
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = current_bwd_node
__a : Optional[Any] = current_fwd_node
__a : Any = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = self.fwd_bfs.retrace_path(_UpperCAmelCase )
__a : List[str] = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
__a : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A = (0, 0)
A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A = time.time()
A = BreadthFirstSearch(init, goal)
A = bfs.search()
A = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
A = time.time()
A = BidirectionalBreadthFirstSearch(init, goal)
A = bd_bfs.search()
A = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 52
|
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
if not isinstance(snake_case__, snake_case__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(snake_case__, snake_case__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__UpperCAmelCase : Any = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 637
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] , _snake_case : int ):
if len(_snake_case ) == 0:
return False
lowerCAmelCase : List[Any] = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip())
snake_case__ : str = '''''' if binary_search(sequence, target) else '''not '''
print(f"""{target} was {not_str}found in {sequence}""")
| 637
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Tuple = field(
default="""cifar10""" ,metadata={"""help""": """Name of a dataset from the datasets package"""} )
_UpperCamelCase : Tuple = field(
default=A_ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_UpperCamelCase : Optional[Any] = field(
default=A_ ,metadata={"""help""": """The column name of the images in the files."""} )
_UpperCamelCase : str = field(default=A_ ,metadata={"""help""": """A folder containing the training data."""} )
_UpperCamelCase : List[str] = field(default=A_ ,metadata={"""help""": """A folder containing the validation data."""} )
_UpperCamelCase : Tuple = field(
default=0.15 ,metadata={"""help""": """Percent to split off of train for validation."""} )
_UpperCamelCase : Union[str, Any] = field(
default=A_ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
_UpperCamelCase : List[Any] = field(
default=A_ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = {}
if self.train_dir is not None:
a__ : int = self.train_dir
if self.validation_dir is not None:
a__ : Tuple = self.validation_dir
a__ : Optional[int] = data_files if data_files else None
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Tuple = field(
default=A_ ,metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch."""
)
} ,)
_UpperCamelCase : Optional[Any] = field(
default=A_ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_UpperCamelCase : Dict = field(
default=A_ ,metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} ,)
_UpperCamelCase : Dict = field(
default=A_ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_UpperCamelCase : List[Any] = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
_UpperCamelCase : Optional[Any] = field(default=A_ ,metadata={"""help""": """Name or path of preprocessor config."""} )
_UpperCamelCase : int = field(
default=A_ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
_UpperCamelCase : int = field(
default=0.75 ,metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_UpperCamelCase : Dict = field(
default=A_ ,metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class __lowerCAmelCase ( A_ ):
_UpperCamelCase : List[str] = field(
default=1E-3 ,metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def _A ( lowerCamelCase ):
a__ : int = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def _A ( ):
a__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a__ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
a__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a__ : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
a__ : List[str] = ds["""train"""].train_test_split(data_args.train_val_split )
a__ : Union[str, Any] = split["""train"""]
a__ : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
a__ : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
a__ : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
a__ : Optional[Any] = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
a__ : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
a__ : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
a__ : Union[str, Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
a__ : List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
a__ : Union[str, Any] = ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
a__ : List[Any] = ds["""train"""].column_names
else:
a__ : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
a__ : str = data_args.image_column_name
elif "image" in column_names:
a__ : Optional[Any] = """image"""
elif "img" in column_names:
a__ : List[Any] = """img"""
else:
a__ : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
a__ : Dict = image_processor.size["""shortest_edge"""]
else:
a__ : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
a__ : Tuple = Compose(
[
Lambda(lambda lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowerCamelCase ):
a__ : Dict = [transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
a__ : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
a__ : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
a__ : Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
a__ : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
a__ : Optional[Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
a__ : Any = None
if training_args.resume_from_checkpoint is not None:
a__ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ : Union[str, Any] = last_checkpoint
a__ : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a__ : int = trainer.evaluate()
trainer.log_metrics("eval" , __snake_case )
trainer.save_metrics("eval" , __snake_case )
# Write model card and (optionally) push to hub
a__ : Optional[Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _A ( lowerCamelCase ):
main()
if __name__ == "__main__":
main()
| 112
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCAmelCase = field(
default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Any = {}
if self.train_dir is not None:
_lowerCamelCase : int = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : Tuple = self.validation_dir
_lowerCamelCase : Optional[int] = data_files if data_files else None
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCAmelCase = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
__UpperCAmelCase = field(
default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase__ ( A_ ):
__UpperCAmelCase = field(
default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_lowerCamelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
_lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split )
_lowerCamelCase : Union[str, Any] = split["""train"""]
_lowerCamelCase : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
_lowerCamelCase : List[Any] = ds["""train"""].column_names
else:
_lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : str = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : Optional[Any] = """image"""
elif "img" in column_names:
_lowerCamelCase : List[Any] = """img"""
else:
_lowerCamelCase : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase : Dict = image_processor.size["""shortest_edge"""]
else:
_lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
_lowerCamelCase : Tuple = Compose(
[
Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : Optional[Any] ):
_lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
_lowerCamelCase : Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCamelCase : Optional[Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_lowerCamelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Union[str, Any] = last_checkpoint
_lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : int = trainer.evaluate()
trainer.log_metrics("""eval""" , __snake_case )
trainer.save_metrics("""eval""" , __snake_case )
# Write model card and (optionally) push to hub
_lowerCamelCase : Optional[Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 88
| 0
|
import re
import string
import numpy as np
import datasets
__lowerCamelCase : Optional[int] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__lowerCamelCase : Tuple = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__lowerCamelCase : str = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=False , ) -> Any:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase = np.array([re.sub(_lowercase , "" , _lowercase ) for x in predictions] )
UpperCAmelCase = np.array([re.sub(_lowercase , "" , _lowercase ) for x in references] )
else:
UpperCAmelCase = np.asarray(_lowercase )
UpperCAmelCase = np.asarray(_lowercase )
if ignore_case:
UpperCAmelCase = np.char.lower(_lowercase )
UpperCAmelCase = np.char.lower(_lowercase )
if ignore_punctuation:
UpperCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
if ignore_numbers:
UpperCAmelCase = string.digits.maketrans("" , "" , string.digits )
UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
UpperCAmelCase = predictions == references
return {"exact_match": np.mean(_lowercase ) * 1_00}
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : Optional[int] = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 180
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , labels=lowercase__ ).loss
SCREAMING_SNAKE_CASE_ : Dict = -tf.math.reduce_mean(lowercase__ ).numpy()
SCREAMING_SNAKE_CASE_ : str = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 421
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=__lowerCamelCase ):
lowerCamelCase_ =['onnx']
def __init__( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : str) -> Tuple:
requires_backends(self , ["onnx"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *__lowerCAmelCase : str , **__lowerCAmelCase : Dict) -> Optional[Any]:
requires_backends(cls , ["onnx"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : Tuple) -> Optional[Any]:
requires_backends(cls , ["onnx"])
| 703
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
lowerCamelCase_ =None
lowerCamelCase_ ="utf-8"
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =True # deprecated
lowerCamelCase_ =None # deprecated
lowerCamelCase_ =1_0 << 2_0 # 10MB
lowerCamelCase_ =None
class lowercase ( datasets.ArrowBasedBuilder ):
lowerCamelCase_ =JsonConfig
def __UpperCAmelCase ( self : int) -> List[str]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
lowercase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.")
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : int) -> Any:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
lowercase_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__lowerCAmelCase , (str, list, tuple)):
lowercase_ = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(__lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
lowercase_ = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(__lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={"files": files}))
return splits
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
lowercase_ = self.config.features.arrow_schema.field(__lowerCAmelCase).type
lowercase_ = pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase) , type=__lowerCAmelCase))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ = table_cast(__lowerCAmelCase , self.config.features.arrow_schema)
return pa_table
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : List[Any]) -> List[Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowercase_ = json.load(__lowerCAmelCase)
# We keep only the field we are interested in
lowercase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple)):
lowercase_ = set().union(*[row.keys() for row in dataset])
lowercase_ = {col: [row.get(__lowerCAmelCase) for row in dataset] for col in keys}
else:
lowercase_ = dataset
lowercase_ = pa.Table.from_pydict(__lowerCAmelCase)
yield file_idx, self._cast_table(__lowerCAmelCase)
# If the file has one json object per line
else:
with open(__lowerCAmelCase , "rb") as f:
lowercase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ = max(self.config.chunksize // 32 , 16 << 10)
lowercase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
lowercase_ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ = batch.decode(self.config.encoding , errors=__lowerCAmelCase).encode("utf-8")
try:
while True:
try:
lowercase_ = paj.read_json(
io.BytesIO(__lowerCAmelCase) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid)
and "straddling" not in str(__lowerCAmelCase)
or block_size > len(__lowerCAmelCase)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__lowerCAmelCase)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.')
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowercase_ = json.load(__lowerCAmelCase)
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase)}: {e}')
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase): # list is the only sequence type supported in JSON
try:
lowercase_ = set().union(*[row.keys() for row in dataset])
lowercase_ = {col: [row.get(__lowerCAmelCase) for row in dataset] for col in keys}
lowercase_ = pa.Table.from_pydict(__lowerCAmelCase)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase)}: {e}')
raise ValueError(F'Not able to read records in the JSON file at {file}.') from None
yield file_idx, self._cast_table(__lowerCAmelCase)
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase)}: {e}')
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys()))}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ') from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase)
batch_idx += 1
| 461
| 0
|
"""simple docstring"""
def lowercase_ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__UpperCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 299
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : str , *UpperCamelCase : int , **UpperCamelCase : str ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 299
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
A : Optional[int] = 'trocr'
A : Dict = ['past_key_values']
A : Dict = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0265 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Tuple = d_model
snake_case_ : List[str] = decoder_layers
snake_case_ : int = decoder_attention_heads
snake_case_ : Optional[int] = decoder_ffn_dim
snake_case_ : Optional[Any] = activation_function
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Tuple = dropout
snake_case_ : Tuple = attention_dropout
snake_case_ : str = activation_dropout
snake_case_ : int = init_std
snake_case_ : Dict = decoder_layerdrop
snake_case_ : List[str] = use_cache
snake_case_ : int = scale_embedding
snake_case_ : Optional[Any] = use_learned_position_embeddings
snake_case_ : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'donut-swin'
A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = image_size
snake_case_ : Any = patch_size
snake_case_ : str = num_channels
snake_case_ : Dict = embed_dim
snake_case_ : Tuple = depths
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = num_heads
snake_case_ : Optional[int] = window_size
snake_case_ : Any = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : str = drop_path_rate
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[int] = use_absolute_embeddings
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Any = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
| 114
| 0
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( lowercase__ ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCamelCase : Tuple = [1, 2, 3]
with pytest.raises(lowercase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase__ , lowercase__ , num_proc=2 )
with pytest.raises(lowercase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase__ , lowercase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Any = [1, 2]
_lowerCamelCase : int = {'a': 1, 'b': 2}
_lowerCamelCase : int = {'a': [1, 2], 'b': [3, 4]}
_lowerCamelCase : int = {'a': {'1': 1}, 'b': 2}
_lowerCamelCase : List[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
_lowerCamelCase : Union[str, Any] = [2, 3]
_lowerCamelCase : List[Any] = {'a': 2, 'b': 3}
_lowerCamelCase : Any = {'a': [2, 3], 'b': [4, 5]}
_lowerCamelCase : Any = {'a': {'1': 2}, 'b': 3}
_lowerCamelCase : List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
| 630
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__ ( lowercase, lowercase, lowercase ):
'''simple docstring'''
lowerCamelCase__ = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = 50257 , lowercase = 1024 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = None , lowercase = "gelu_new" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1E-5 , lowercase = 0.02 , lowercase = True , lowercase = True , lowercase = False , lowercase = False , ):
super().__init__()
_lowerCamelCase : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
_lowerCamelCase : Optional[Any] = prefix_inner_dim
_lowerCamelCase : Tuple = prefix_hidden_dim
_lowerCamelCase : Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCamelCase : Tuple = (
nn.Linear(self.prefix_hidden_dim , lowercase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCamelCase : Any = GPTaConfig(
vocab_size=lowercase , n_positions=lowercase , n_embd=lowercase , n_layer=lowercase , n_head=lowercase , n_inner=lowercase , activation_function=lowercase , resid_pdrop=lowercase , embd_pdrop=lowercase , attn_pdrop=lowercase , layer_norm_epsilon=lowercase , initializer_range=lowercase , scale_attn_weights=lowercase , use_cache=lowercase , scale_attn_by_inverse_layer_idx=lowercase , reorder_and_upcast_attn=lowercase , )
_lowerCamelCase : Dict = GPTaLMHeadModel(lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , lowercase = None , ):
_lowerCamelCase : List[Any] = self.transformer.transformer.wte(lowercase )
_lowerCamelCase : str = self.encode_prefix(lowercase )
_lowerCamelCase : int = self.decode_prefix(lowercase )
_lowerCamelCase : Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCamelCase : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCamelCase : int = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCamelCase : List[Any] = self.transformer(inputs_embeds=lowercase , labels=lowercase , attention_mask=lowercase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def A_ ( self , lowercase , lowercase ):
return torch.zeros(lowercase , self.prefix_length , dtype=torch.intaa , device=lowercase )
def A_ ( self , lowercase ):
return self.encode_prefix(lowercase )
@torch.no_grad()
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Dict = torch.split(lowercase , 1 , dim=0 )
_lowerCamelCase : Any = []
_lowerCamelCase : str = []
for feature in features:
_lowerCamelCase : Any = self.decode_prefix(feature.to(lowercase ) ) # back to the clip feature
# Only support beam search for now
_lowerCamelCase, _lowerCamelCase : Dict = self.generate_beam(
input_embeds=lowercase , device=lowercase , eos_token_id=lowercase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCamelCase : List[Any] = torch.stack(lowercase )
_lowerCamelCase : Optional[int] = torch.stack(lowercase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase = 5 , lowercase = 67 , lowercase = 1.0 , lowercase = None , ):
_lowerCamelCase : Any = eos_token_id
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[str] = torch.ones(lowercase , device=lowercase , dtype=torch.int )
_lowerCamelCase : Optional[int] = torch.zeros(lowercase , device=lowercase , dtype=torch.bool )
if input_embeds is not None:
_lowerCamelCase : Optional[int] = input_embeds
else:
_lowerCamelCase : Union[str, Any] = self.transformer.transformer.wte(lowercase )
for i in range(lowercase ):
_lowerCamelCase : Any = self.transformer(inputs_embeds=lowercase )
_lowerCamelCase : Optional[Any] = outputs.logits
_lowerCamelCase : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCamelCase : Optional[int] = logits.softmax(-1 ).log()
if scores is None:
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = logits.topk(lowercase , -1 )
_lowerCamelCase : Optional[int] = generated.expand(lowercase , *generated.shape[1:] )
_lowerCamelCase, _lowerCamelCase : Tuple = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCamelCase : List[str] = next_tokens
else:
_lowerCamelCase : Tuple = tokens.expand(lowercase , *tokens.shape[1:] )
_lowerCamelCase : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCamelCase : Union[str, Any] = -float(np.inf )
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCamelCase : List[Any] = scores_sum / seq_lengths[:, None]
_lowerCamelCase, _lowerCamelCase : Optional[Any] = scores_sum_average.view(-1 ).topk(lowercase , -1 )
_lowerCamelCase : Union[str, Any] = next_tokens // scores_sum.shape[1]
_lowerCamelCase : Optional[Any] = seq_lengths[next_tokens_source]
_lowerCamelCase : Dict = next_tokens % scores_sum.shape[1]
_lowerCamelCase : Optional[int] = next_tokens.unsqueeze(1 )
_lowerCamelCase : Tuple = tokens[next_tokens_source]
_lowerCamelCase : str = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCamelCase : Optional[int] = generated[next_tokens_source]
_lowerCamelCase : Dict = scores_sum_average * seq_lengths
_lowerCamelCase : List[Any] = is_stopped[next_tokens_source]
_lowerCamelCase : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCamelCase : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCamelCase : Optional[Any] = is_stopped + next_tokens.eq(lowercase ).squeeze()
if is_stopped.all():
break
_lowerCamelCase : Optional[int] = scores / seq_lengths
_lowerCamelCase : Optional[int] = scores.argsort(descending=lowercase )
# tokens tensors are already padded to max_seq_length
_lowerCamelCase : List[Any] = [tokens[i] for i in order]
_lowerCamelCase : Dict = torch.stack(lowercase , dim=0 )
_lowerCamelCase : Optional[int] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 630
| 1
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ : Tuple = []
__magic_name__ : Dict = set({'''(''', '''[''', '''{'''} )
__magic_name__ : str = set({''')''', ''']''', '''}'''} )
__magic_name__ : int = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowerCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase ) == 0 or (len(lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase ) == 0
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__magic_name__ : Any = input('''Enter sequence of brackets: ''' )
if is_balanced(lowerCamelCase ):
print(lowerCamelCase , '''is balanced''' )
else:
print(lowerCamelCase , '''is not balanced''' )
if __name__ == "__main__":
main()
| 719
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Dict , snake_case : Path , snake_case : Union[str, None] = None , snake_case : Union[List[str], None] = None , snake_case : Union[str, List[str], None] = None , snake_case : bool = True , ) -> int:
'''simple docstring'''
__magic_name__ : List[str] = [file for file in os.listdir(snake_case ) if os.path.isfile(os.path.join(snake_case , snake_case ) )]
if identifier is not None:
__magic_name__ : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case , snake_case ):
for n_ in n_identifier:
__magic_name__ : int = [file for file in files if n_ not in file]
else:
__magic_name__ : Tuple = [file for file in files if n_identifier not in file]
__magic_name__ : Tuple = ignore_files or []
ignore_files.append('''__init__.py''' )
__magic_name__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , snake_case )
if only_modules:
__magic_name__ : List[Any] = file.split('''.''' )[0]
try:
__magic_name__ : Dict = getattr(snake_case , snake_case )
__magic_name__ : List[str] = doctest.DocTestSuite(snake_case )
__magic_name__ : Dict = unittest.TextTestRunner().run(snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__magic_name__ : Tuple = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = Path('''src/transformers''' )
__magic_name__ : str = '''modeling'''
__magic_name__ : str = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(snake_case , identifier=snake_case , ignore_files=snake_case )
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Dict = Path('''src/transformers''' )
__magic_name__ : Union[str, Any] = '''tokenization'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = Path('''src/transformers''' )
__magic_name__ : int = '''configuration'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = Path('''src/transformers''' )
__magic_name__ : str = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(snake_case , n_identifier=snake_case )
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Any = Path('''docs/source''' )
__magic_name__ : str = ['''favicon.ico''']
self.analyze_directory(snake_case , ignore_files=snake_case , only_modules=snake_case )
| 147
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
lowerCAmelCase_ = doctest.register_optionflag('''IGNORE_RESULT''')
lowerCAmelCase_ = doctest.OutputChecker
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Any ) ->List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = CustomOutputChecker
lowerCAmelCase_ = HfDoctestModule
lowerCAmelCase_ = HfDocTestParser
| 39
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ = 1_2_8_0_2_2
lowercase__ = 1_2_8_0_2_8
@require_sentencepiece
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MaMaaaTokenizer
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
snake_case : int = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = '''</s>'''
snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Tuple = self.get_tokenizer()
snake_case : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''This is a test''' )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = {'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = """facebook/m2m100_418M"""
lowerCamelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCamelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCamelCase = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def lowerCAmelCase ( cls : List[Any] ) -> int:
"""simple docstring"""
snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
snake_case : List[str] = 1
return cls
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = '''en'''
snake_case : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case : str = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case : int = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
snake_case : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = '''en'''
snake_case : int = '''fr'''
snake_case : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' )
snake_case : List[str] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case : Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case : Optional[int] = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : List[Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , )
| 638
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A__ ( __lowerCAmelCase : Any ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A__ ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=2 )
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {"""a""": 1, """b""": 2}
lowerCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
lowerCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
lowerCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {"""a""": 2, """b""": 3}
lowerCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
lowerCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
lowerCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
| 714
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9
| 0
|
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 15
|
"""simple docstring"""
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 76
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[int] = "imagegpt"
__lowercase : int = ["past_key_values"]
__lowercase : Union[str, Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , A_=512 + 1 , A_=32 * 32 , A_=512 , A_=24 , A_=8 , A_=None , A_="quick_gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_=True , A_=True , A_=False , A_=False , A_=False , **A_ , ) -> Dict:
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = scale_attn_weights
UpperCamelCase = use_cache
UpperCamelCase = scale_attn_by_inverse_layer_idx
UpperCamelCase = reorder_and_upcast_attn
UpperCamelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=A_ , **A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def __UpperCamelCase ( self , A_ , A_ = 1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 32 , A_ = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
return inputs
| 3
|
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase = self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.update(A_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase = token_ids
UpperCamelCase = len(self.token_ids )
UpperCamelCase = -1 # the index of the currently fulfilled step
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase = True
UpperCamelCase = completed
else:
# failed to make progress.
UpperCamelCase = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = 0
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self , A_=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.fulfilled_idx
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ , A_=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = max([len(A_ ) for one in nested_token_ids] )
UpperCamelCase = {}
for token_ids in nested_token_ids:
UpperCamelCase = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
UpperCamelCase = {}
UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
UpperCamelCase = root
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.trie
for current_token in current_seq:
UpperCamelCase = start[current_token]
UpperCamelCase = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.next_tokens(A_ )
return len(A_ ) == 0
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase = DisjunctiveTrie(A_ )
UpperCamelCase = nested_token_ids
UpperCamelCase = self.trie.max_height
UpperCamelCase = []
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
UpperCamelCase = True
else:
UpperCamelCase = True
self.reset()
UpperCamelCase = self.trie.reached_leaf(self.current_seq )
UpperCamelCase = completed
return stepped, completed, reset
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase = self.seqlen
UpperCamelCase = self.current_seq
UpperCamelCase = self.completed
return new_constraint
class lowercase :
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase = max([c.seqlen for c in constraints] )
UpperCamelCase = len(A_ )
UpperCamelCase = False
self.init_state()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
UpperCamelCase = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase , UpperCamelCase = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase , UpperCamelCase = False, False
if self.completed:
UpperCamelCase = True
UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
UpperCamelCase = None
if not complete and stepped:
UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self , A_=True ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase = self.inprogress_constraint.copy(stateful=A_ )
UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 3
| 1
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "segformer"
def __init__( self : Any , lowerCAmelCase : str=3 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : List[Any]=[2, 2, 2, 2] , lowerCAmelCase : str=[8, 4, 2, 1] , lowerCAmelCase : Optional[int]=[32, 64, 1_60, 2_56] , lowerCAmelCase : str=[7, 3, 3, 3] , lowerCAmelCase : int=[4, 2, 2, 2] , lowerCAmelCase : str=[1, 2, 5, 8] , lowerCAmelCase : int=[4, 4, 4, 4] , lowerCAmelCase : int="gelu" , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Dict=1E-6 , lowerCAmelCase : List[str]=2_56 , lowerCAmelCase : Union[str, Any]=2_55 , **lowerCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowerCAmelCase , )
lowercase__ = num_channels
lowercase__ = num_encoder_blocks
lowercase__ = depths
lowercase__ = sr_ratios
lowercase__ = hidden_sizes
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = mlp_ratios
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = drop_path_rate
lowercase__ = layer_norm_eps
lowercase__ = decoder_hidden_size
lowercase__ = kwargs.get('reshape_last_stage' , lowerCAmelCase)
lowercase__ = semantic_loss_ignore_index
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Dict = version.parse("1.11" )
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def UpperCAmelCase ( self : str) -> float:
"""simple docstring"""
return 1E-4
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 12
| 622
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ):
assert _test_patching.open is open
lowercase__ = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ):
# pandas.read_csv is not present in _test_patching
lowercase__ = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , A__ ):
pass
def _lowerCAmelCase ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowercase__ = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , A__ ) is None
with patch_submodule(_test_patching , 'len' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ):
lowercase__ = '__test_patch_submodule_start_and_stop_mock__'
lowercase__ = patch_submodule(_test_patching , 'open' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ = '__test_patch_submodule_successive_join__'
lowercase__ = '__test_patch_submodule_successive_dirname__'
lowercase__ = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , A__ ):
with patch_submodule(_test_patching , 'os.rename' , A__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , A__ ):
with patch_submodule(_test_patching , 'os.path.join' , A__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ):
lowercase__ = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , A__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , A__ ):
pass
| 622
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''blip_2_vision_model'''
def __init__( self : Dict , lowerCamelCase_ : Optional[Any]=14_08 , lowerCamelCase_ : Optional[int]=61_44 , lowerCamelCase_ : str=39 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : Dict=14 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.00_001 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : List[str]=1e-10 , lowerCamelCase_ : Dict=True , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : str ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
SCREAMING_SNAKE_CASE : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''blip_2_qformer'''
def __init__( self : List[Any] , lowerCamelCase_ : str=3_05_22 , lowerCamelCase_ : Union[str, Any]=7_68 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : str=12 , lowerCamelCase_ : Union[str, Any]=30_72 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Dict=5_12 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[Any]=14_08 , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[Any] = cross_attention_frequency
SCREAMING_SNAKE_CASE : List[str] = encoder_hidden_size
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
SCREAMING_SNAKE_CASE : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''blip-2'''
SCREAMING_SNAKE_CASE__ = True
def __init__( self : List[Any] , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : str=32 , **lowerCamelCase_ : Any ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
SCREAMING_SNAKE_CASE : str = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = BlipaQFormerConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[text_model_type](**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE : Dict = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE : Union[str, Any] = num_query_tokens
SCREAMING_SNAKE_CASE : List[str] = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE : List[str] = 1.0
SCREAMING_SNAKE_CASE : str = 0.02
@classmethod
def lowerCamelCase_ ( cls : int , lowerCamelCase_ : BlipaVisionConfig , lowerCamelCase_ : BlipaQFormerConfig , lowerCamelCase_ : PretrainedConfig , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : List[str] = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : int = self.__class__.model_type
return output
| 721
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''deberta-v2'''
def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = relative_attention
SCREAMING_SNAKE_CASE : str = max_relative_positions
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : List[str] = position_biased_input
# Backwards compatibility
if type(lowerCamelCase_ ) == str:
SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pooler_dropout
SCREAMING_SNAKE_CASE : Any = pooler_hidden_act
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 12
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 79
| 0
|
def UpperCamelCase__ ( _A: float , _A: int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_A ) , _A )
return number - int(_A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 479
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=UpperCAmelCase , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase )
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=UpperCAmelCase , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase )
def UpperCamelCase__ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase__ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@require_beam
def lowerCamelCase_ ( self ):
__lowerCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = DummyBeamDataset(cache_dir=UpperCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self ):
import apache_beam as beam
__lowerCamelCase = beam.io.parquetio.WriteToParquet
__lowerCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = DummyBeamDataset(cache_dir=UpperCAmelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__lowerCamelCase = partial(UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = DummyBeamDataset(cache_dir=UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCamelCase_ ( self ):
__lowerCamelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = NestedBeamDataset(cache_dir=UpperCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 479
| 1
|
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(lowercase , lowercase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
|
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
UpperCamelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase = tf.placeholder('float64' , [dim] )
UpperCamelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase = tf.placeholder('int32' )
UpperCamelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.placeholder('float' , [dim] )
UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase = tf.placeholder('float' , [noofclusters] )
UpperCamelCase = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
UpperCamelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
UpperCamelCase = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase = sess.run(lowercase )
UpperCamelCase = sess.run(lowercase )
return centroids, assignments
| 3
| 1
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 100 ) -> int:
"""simple docstring"""
_A = n * (n + 1) * (2 * n + 1) / 6
_A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 27
|
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
SCREAMING_SNAKE_CASE : int =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
| 0
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : Tuple = OmegaConf.load(_lowerCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCAmelCase ) ) )
return config
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ):
'''simple docstring'''
if conf_path is None:
A_ : Optional[int] = """./model_checkpoints/vqgan_only.yaml"""
A_ : Dict = load_config(_lowerCAmelCase ,display=_lowerCAmelCase )
A_ : Any = VQModel(**config.model.params )
if ckpt_path is None:
A_ : Any = """./model_checkpoints/vqgan_only.pt"""
A_ : Tuple = torch.load(_lowerCAmelCase ,map_location=_lowerCAmelCase )
if ".ckpt" in ckpt_path:
A_ : int = sd["""state_dict"""]
model.load_state_dict(_lowerCAmelCase ,strict=_lowerCAmelCase )
model.to(_lowerCAmelCase )
del sd
return model
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ , A_ , A_ : List[Any] = model.encode(_lowerCAmelCase )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
A_ : Union[str, Any] = model.decode(_lowerCAmelCase )
return xrec
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ , A_ : int = string.rsplit(""".""" ,1 )
if reload:
A_ : Dict = importlib.import_module(_lowerCAmelCase )
importlib.reload(_lowerCAmelCase )
return getattr(importlib.import_module(_lowerCAmelCase ,package=_lowerCAmelCase ) ,cls )
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" ,{} ) )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=True ,_lowerCAmelCase=True ):
'''simple docstring'''
A_ : Tuple = instantiate_from_config(_lowerCAmelCase )
if sd is not None:
model.load_state_dict(_lowerCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if ckpt:
A_ : str = torch.load(_lowerCAmelCase ,map_location="""cpu""" )
A_ : Tuple = pl_sd["""global_step"""]
print(f"""loaded model from global step {global_step}.""" )
else:
A_ : int = {"""state_dict""": None}
A_ : List[str] = None
A_ : Tuple = load_model_from_config(config.model ,pl_sd["""state_dict"""] ,gpu=_lowerCAmelCase ,eval_mode=_lowerCAmelCase )["""model"""]
return model, global_step
| 481
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
'''simple docstring'''
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Tuple = constant_matrix.shape
if rowsa != colsa:
A_ : int = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
A_ : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
A_ : str = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
A_ : Any = (
"""Number of initial values must be equal to number of rows in coefficient """
f"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
A_ : Union[str, Any] = []
for row in range(_lowerCAmelCase ):
A_ : str = 0
for col in range(_lowerCAmelCase ):
if col == row:
A_ : Optional[Any] = table[row][col]
elif col == cols - 1:
A_ : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : str = (temp + val) / denom
new_val.append(_lowerCAmelCase )
A_ : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ , A_ : str = table.shape
A_ : Any = True
for i in range(0 ,_lowerCAmelCase ):
A_ : Optional[Any] = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase__ :
"""simple docstring"""
def snake_case ( self : List[Any] , __A : Dict , __A : str , __A : Any ):
"""simple docstring"""
return None
class UpperCamelCase__ :
"""simple docstring"""
def snake_case ( self : Tuple , __A : Any , __A : Any , __A : str , __A : Optional[int] ):
"""simple docstring"""
return None
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A , "tf" , 1_2 , **_A )
@require_torch
@slow
def snake_case ( self : str ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A , "pt" , 1_2 , **_A )
@require_torch
@slow
def snake_case ( self : Tuple ):
"""simple docstring"""
from transformers import BertModel
_lowercase = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_A ) )
vocab_file.flush()
_lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowercase = BertModel(BertConfig(vocab_size=len(_A ) ) )
model.save_pretrained(_A )
self._test_export(_A , "pt" , 1_2 , _A )
@require_tf
@slow
def snake_case ( self : Tuple ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowercase = self._test_export(_A , "tf" , 1_2 , **_A )
_lowercase = quantize(Path(_A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowercase = self._test_export(_A , "pt" , 1_2 , **_A )
_lowercase = quantize(_A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def snake_case ( self : List[str] , __A : Optional[int] , __A : Optional[int] , __A : Optional[Any] , __A : Dict=None , **__A : List[Any] ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowercase = Path(_A ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_A , _A , _A , _A , _A , **_A )
return path
except Exception as e:
self.fail(_A )
@require_torch
@require_tokenizers
@slow
def snake_case ( self : str ):
"""simple docstring"""
from transformers import BertModel
_lowercase = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_lowercase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A , _A , "pt" )
@require_tf
@require_tokenizers
@slow
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
from transformers import TFBertModel
_lowercase = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_lowercase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A , _A , "tf" )
def snake_case ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : Dict ):
"""simple docstring"""
_lowercase = FeatureExtractionPipeline(_A , _A )
_lowercase = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_lowercase = infer_shapes(_A , _A )
# Assert all variables are present
self.assertEqual(len(_A ) , len(_A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _A )
self.assertSequenceEqual(variable_names[3:] , _A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = ["input_ids", "attention_mask", "token_type_ids"]
_lowercase = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_lowercase = ensure_valid_input(FuncContiguousArgs() , _A , _A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_A ) , set(_A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_A , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowercase = ensure_valid_input(FuncNonContiguousArgs() , _A , _A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_A ) , 1 )
self.assertEqual(len(_A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 497
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCamelCase : Union[str, Any] = data_utils.TransfoXLTokenizer
__lowerCamelCase : Union[str, Any] = data_utils.TransfoXLCorpus
__lowerCamelCase : Optional[Any] = data_utils
__lowerCamelCase : Optional[int] = data_utils
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCAmelCase , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ : str = pickle.load(lowerCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE_ : Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
SCREAMING_SNAKE_CASE_ : int = corpus.vocab.__dict__
torch.save(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE_ : List[str] = os.path.abspath(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.abspath(lowerCAmelCase )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE_ : Dict = TransfoXLConfig.from_json_file(lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : int = TransfoXLLMHeadModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = load_tf_weights_in_transfo_xl(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {os.path.abspath(lowerCAmelCase )}' )
torch.save(model.state_dict() , lowerCAmelCase )
print(f'Save configuration file to {os.path.abspath(lowerCAmelCase )}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 216
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Dict = 'sew-d'
def __init__( self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=("p2c", "c2p") , SCREAMING_SNAKE_CASE_="layer_norm" , SCREAMING_SNAKE_CASE_="gelu_python" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-7 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="group" , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="mean" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = hidden_size
lowercase__ : Union[str, Any] = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : int = list(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = list(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = list(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = conv_bias
lowercase__ : List[Any] = num_conv_pos_embeddings
lowercase__ : Tuple = num_conv_pos_embedding_groups
lowercase__ : Optional[int] = len(self.conv_dim)
lowercase__ : Dict = num_hidden_layers
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Tuple = squeeze_factor
lowercase__ : Dict = max_position_embeddings
lowercase__ : Optional[Any] = position_buckets
lowercase__ : Optional[Any] = share_att_key
lowercase__ : Tuple = relative_attention
lowercase__ : List[str] = norm_rel_ebd
lowercase__ : Optional[Any] = list(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = hidden_act
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[Any] = hidden_dropout
lowercase__ : List[str] = attention_dropout
lowercase__ : Dict = activation_dropout
lowercase__ : int = feat_proj_dropout
lowercase__ : List[str] = final_dropout
lowercase__ : int = layer_norm_eps
lowercase__ : Optional[int] = feature_layer_norm_eps
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'
f'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : Any = apply_spec_augment
lowercase__ : int = mask_time_prob
lowercase__ : Optional[Any] = mask_time_length
lowercase__ : Any = mask_time_min_masks
lowercase__ : Any = mask_feature_prob
lowercase__ : List[Any] = mask_feature_length
lowercase__ : Optional[Any] = mask_feature_min_masks
# ctc loss
lowercase__ : Optional[int] = ctc_loss_reduction
lowercase__ : List[str] = ctc_zero_infinity
# sequence classification
lowercase__ : str = use_weighted_layer_sum
lowercase__ : List[str] = classifier_proj_size
@property
def lowercase__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 706
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = 'dandelin/vilt-b32-finetuned-vqa'
__lowerCAmelCase : Union[str, Any] = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
__lowerCAmelCase : str = 'image_qa'
__lowerCAmelCase : int = AutoProcessor
__lowerCAmelCase : Optional[Any] = AutoModelForVisualQuestionAnswering
__lowerCAmelCase : str = ['image', 'text']
__lowerCAmelCase : Optional[int] = ['text']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE_).logits
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 495
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase__( lowercase : bool = True , *lowercase : Union[str, Any] , **lowercase : List[str] ) -> List[str]:
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
__snake_case : Tuple = False
if main_process_only:
__snake_case : List[Any] = PartialState().local_process_index == 0
return _tqdm(*lowercase , **lowercase , disable=lowercase )
| 243
|
import tensorflow as tf
from ...tf_utils import shape_list
class _lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
__snake_case : int = vocab_size
__snake_case : str = d_embed
__snake_case : List[Any] = d_proj
__snake_case : List[str] = cutoffs + [vocab_size]
__snake_case : str = [0] + self.cutoffs
__snake_case : Union[str, Any] = div_val
__snake_case : List[str] = self.cutoffs[0]
__snake_case : Any = len(self.cutoffs ) - 1
__snake_case : Dict = self.shortlist_size + self.n_clusters
__snake_case : Dict = keep_order
__snake_case : List[str] = []
__snake_case : Union[str, Any] = []
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
__snake_case : Optional[int] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=UpperCAmelCase , name="cluster_weight" )
__snake_case : Any = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=UpperCAmelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__snake_case : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_projs_._{i}""" , )
self.out_projs.append(UpperCAmelCase )
else:
self.out_projs.append(UpperCAmelCase )
__snake_case : Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
__snake_case : int = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case : List[Any] = self.d_embed // (self.div_val**i)
__snake_case : Optional[int] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_projs_._{i}""" )
self.out_projs.append(UpperCAmelCase )
__snake_case : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
__snake_case : Optional[int] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = x
if proj is not None:
__snake_case : List[str] = tf.einsum("ibd,ed->ibe" , UpperCAmelCase , UpperCAmelCase )
return tf.einsum("ibd,nd->ibn" , UpperCAmelCase , UpperCAmelCase ) + b
@staticmethod
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : Any = shape_list(UpperCAmelCase )
__snake_case : Optional[int] = tf.range(lp_size[0] , dtype=target.dtype )
__snake_case : Union[str, Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = 0
if self.n_clusters == 0:
__snake_case : int = self._logit(UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__snake_case : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCAmelCase , logits=UpperCAmelCase )
__snake_case : int = tf.nn.log_softmax(UpperCAmelCase , axis=-1 )
else:
__snake_case : Optional[int] = shape_list(UpperCAmelCase )
__snake_case : List[Any] = []
__snake_case : str = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__snake_case : Optional[Any] = (target >= l_idx) & (target < r_idx)
__snake_case : Union[str, Any] = tf.where(UpperCAmelCase )
__snake_case : Optional[Any] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase ) - l_idx
if self.div_val == 1:
__snake_case : Dict = self.out_layers[0][0][l_idx:r_idx]
__snake_case : int = self.out_layers[0][1][l_idx:r_idx]
else:
__snake_case : Union[str, Any] = self.out_layers[i][0]
__snake_case : Optional[int] = self.out_layers[i][1]
if i == 0:
__snake_case : Any = tf.concat([cur_W, self.cluster_weight] , 0 )
__snake_case : str = tf.concat([cur_b, self.cluster_bias] , 0 )
__snake_case : Dict = self._logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.out_projs[0] )
__snake_case : List[Any] = tf.nn.log_softmax(UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__snake_case : int = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : List[str] = self._gather_logprob(UpperCAmelCase , UpperCAmelCase )
else:
__snake_case : int = self._logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.out_projs[i] )
__snake_case : Optional[Any] = tf.nn.log_softmax(UpperCAmelCase )
__snake_case : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__snake_case : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCAmelCase )
if target is not None:
__snake_case : Optional[Any] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : Optional[int] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : Optional[int] = self._gather_logprob(UpperCAmelCase , UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCAmelCase , -cur_logprob , shape_list(UpperCAmelCase ) )
__snake_case : Dict = tf.concat(UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
__snake_case : int = tf.reduce_mean(UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCAmelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 243
| 1
|
"""simple docstring"""
import math
class A__ :
"""simple docstring"""
def a__ ( self: str , __a: list[list[float]] , __a: list[int] )-> int:
lowerCamelCase : Dict = 0.0
lowerCamelCase : Tuple = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a__ ( self: Dict , __a: list[list[int | float]] , __a: list[int] , __a: int , __a: float )-> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case ( ) -> None:
# Training Examples ( m, n )
lowerCamelCase : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase : Optional[int] = SelfOrganizingMap()
lowerCamelCase : List[Any] = 3
lowerCamelCase : Tuple = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
lowerCamelCase : Any = training_samples[j]
# Compute the winning vector
lowerCamelCase : Optional[int] = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
lowerCamelCase : List[Any] = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
lowerCamelCase : Dict = [0, 0, 0, 1]
lowerCamelCase : Dict = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[Any] = "encodec"
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , __lowerCAmelCase : Optional[Any]=2_40_00 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=1_28 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : int=1 , __lowerCAmelCase : List[str]=[8, 5, 4, 2] , __lowerCAmelCase : Optional[int]="weight_norm" , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Union[str, Any]=7 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Dict="reflect" , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=1.0 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : str=True , **__lowerCAmelCase : str , ) -> str:
_A = target_bandwidths
_A = sampling_rate
_A = audio_channels
_A = normalize
_A = chunk_length_s
_A = overlap
_A = hidden_size
_A = num_filters
_A = num_residual_layers
_A = upsampling_ratios
_A = norm_type
_A = kernel_size
_A = last_kernel_size
_A = residual_kernel_size
_A = dilation_growth_rate
_A = use_causal_conv
_A = pad_mode
_A = compress
_A = num_lstm_layers
_A = trim_right_ratio
_A = codebook_size
_A = codebook_dim if codebook_dim is not None else hidden_size
_A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__lowerCAmelCase )
@property
def snake_case_ ( self : Dict ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self : int ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self : Optional[Any] ) -> int:
_A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self : int ) -> int:
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 2
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
lowercase = torch.load(hf_hub_download(repo_id=lowerCAmelCase_ , filename="pytorch_model.bin" ) )
lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
lowercase = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
lowercase = tensor_value
lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_ , config=lowerCAmelCase_ , state_dict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
# convert tokenizer
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase : str = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 310
| 0
|
'''simple docstring'''
import numpy as np
import qiskit
def __UpperCAmelCase ( _UpperCAmelCase : int = 8 , _UpperCAmelCase : int | None = None ) -> str:
__snake_case = np.random.default_rng(seed=_UpperCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case = rng.integers(2 , size=_UpperCAmelCase )
# The set of states Alice will prepare.
__snake_case = rng.integers(2 , size=_UpperCAmelCase )
# Measurement basis for Bob's qubits.
__snake_case = rng.integers(2 , size=_UpperCAmelCase )
# Quantum Circuit to simulate BB84
__snake_case = qiskit.QuantumCircuit(_UpperCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_UpperCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1 , seed_simulator=_UpperCAmelCase )
# Returns the result of measurement.
__snake_case = job.result().get_counts(_UpperCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case = gen_key[:key_len] if len(_UpperCAmelCase ) >= key_len else gen_key.ljust(_UpperCAmelCase , "0" )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 680
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
__A = RobertaTokenizer
def __init__( self : List[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[str]="replace" , lowercase_ : Optional[Any]="<s>" , lowercase_ : int="</s>" , lowercase_ : Dict="</s>" , lowercase_ : Optional[Any]="<s>" , lowercase_ : Any="<unk>" , lowercase_ : Tuple="<pad>" , lowercase_ : Dict="<mask>" , lowercase_ : int=False , lowercase_ : List[Any]=True , **lowercase_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , lowercase_) != add_prefix_space:
_UpperCamelCase = getattr(lowercase_ , pre_tok_state.pop("type"))
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**lowercase_)
_UpperCamelCase = add_prefix_space
_UpperCamelCase = "post_processor"
_UpperCamelCase = getattr(self.backend_tokenizer , lowercase_ , lowercase_)
if tokenizer_component_instance:
_UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase = tuple(state["sep"])
if "cls" in state:
_UpperCamelCase = tuple(state["cls"])
_UpperCamelCase = False
if state.get("add_prefix_space" , lowercase_) != add_prefix_space:
_UpperCamelCase = add_prefix_space
_UpperCamelCase = True
if state.get("trim_offsets" , lowercase_) != trim_offsets:
_UpperCamelCase = trim_offsets
_UpperCamelCase = True
if changes_to_apply:
_UpperCamelCase = getattr(lowercase_ , state.pop("type"))
_UpperCamelCase = component_class(**lowercase_)
setattr(self.backend_tokenizer , lowercase_ , lowercase_)
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else value
_UpperCamelCase = value
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : int) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase = kwargs.get("is_split_into_words" , lowercase_)
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : str , *lowercase_ : Optional[int] , **lowercase_ : Dict) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase = kwargs.get("is_split_into_words" , lowercase_)
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
def __UpperCAmelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str=None) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 547
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=56 , lowercase_ : str=True , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=99 , lowercase_ : Optional[int]=32 , lowercase_ : Tuple=2 , lowercase_ : int=2 , lowercase_ : List[str]=7 , lowercase_ : Any="gelu_new" , lowercase_ : List[str]=0.1 , lowercase_ : str=0.1 , lowercase_ : List[Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Union[str, Any]=4 , lowercase_ : Union[str, Any]="block_sparse" , lowercase_ : Tuple=True , lowercase_ : Dict=False , lowercase_ : Dict=2 , lowercase_ : Dict=3 , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
_UpperCamelCase = rescale_embeddings
_UpperCamelCase = attention_type
_UpperCamelCase = use_bias
_UpperCamelCase = block_size
_UpperCamelCase = num_random_blocks
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A = False
__A = False
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained("google/bigbird-roberta-base")
self.assertIsNotNone(lowercase_)
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_)
_UpperCamelCase = model_class(lowercase_)
@jax.jit
def model_jitted(lowercase_ : Dict , lowercase_ : List[Any]=None , **lowercase_ : Tuple):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_)
with self.subTest("JIT Enabled"):
_UpperCamelCase = model_jitted(**lowercase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_UpperCamelCase = model_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def __UpperCAmelCase ( self : Any , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : str=1e-5 , lowercase_ : int="outputs" , lowercase_ : List[str]=None) -> Tuple:
"""simple docstring"""
if name.startswith("outputs.attentions"):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
| 547
| 1
|
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=lowerCamelCase_ ):
a_: int = ["""note_seq"""]
def __init__( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[str] ):
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ):
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
requires_backends(cls , ["""note_seq"""] )
| 713
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Dict , lowercase__ : int ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def snake_case_ ( lowercase__ : int , lowercase__ : int , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Any=True ):
'''simple docstring'''
model.train()
_lowerCAmelCase =model(lowercase__ )
_lowerCAmelCase =F.mse_loss(lowercase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase__ )
def snake_case_ ( lowercase__ : Dict , lowercase__ : List[str]=False ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase =RegressionModel()
_lowerCAmelCase =deepcopy(lowercase__ )
_lowerCAmelCase =RegressionDataset(length=80 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowerCAmelCase =AdamW(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase =AdamW(params=ddp_model.parameters() , lr=1e-3 )
_lowerCAmelCase =LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.6_5 )
_lowerCAmelCase =LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
_lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase =next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase =next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
def snake_case_ ( lowercase__ : Optional[Any]=False , lowercase__ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
GradientState._reset_state()
def snake_case_ ( lowercase__ : int=False , lowercase__ : Dict=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ , lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
_lowerCAmelCase =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =RegressionDataset(length=80 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
_lowerCAmelCase =RegressionDataset(length=96 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if iteration < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if batch_num < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowercase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowercase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(lowercase__ , lowercase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase__ , lowercase__ )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 149
| 0
|
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = min(lowercase ) # min() finds the minimum value
lowerCamelCase_ = max(lowercase ) # max() finds the maximum value
lowerCamelCase_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCamelCase_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase , lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCamelCase_ = 0
for count in range(lowercase ):
while holes[count] > 0:
holes[count] -= 1
lowerCamelCase_ = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase )
print('Sorted order is:' , ' '.join(lowercase ) )
if __name__ == "__main__":
main()
| 70
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_snake_case = "scheduler_config.json"
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = 5
lowerCamelCase__ = 6
lowerCamelCase__ = 7
lowerCamelCase__ = 8
lowerCamelCase__ = 9
lowerCamelCase__ = 10
lowerCamelCase__ = 11
lowerCamelCase__ = 12
lowerCamelCase__ = 13
lowerCamelCase__ = 14
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
class UpperCAmelCase_ :
lowerCamelCase__ = SCHEDULER_CONFIG_NAME
lowerCamelCase__ = []
lowerCamelCase__ = True
@classmethod
def snake_case__ ( cls, __a = None, __a = None, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=__a, subfolder=__a, return_unused_kwargs=__a, return_commit_hash=__a, **__a, )
return cls.from_config(__a, return_unused_kwargs=__a, **__a)
def snake_case__ ( self, __a, __a = False, **__a):
'''simple docstring'''
self.save_config(save_directory=__a, push_to_hub=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Tuple = list(set([cls.__name__] + cls._compatibles))
_lowerCAmelCase : Optional[int] = importlib.import_module(__name__.split(".")[0])
_lowerCAmelCase : Union[str, Any] = [
getattr(__a, __a) for c in compatible_classes_str if hasattr(__a, __a)
]
return compatible_classes
| 500
| 0
|
"""simple docstring"""
import numpy as np
class a :
def __init__( self ):
UpperCAmelCase__ : Optional[Any] = (0, 0)
UpperCAmelCase__ : int = None
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Any = 0
def __eq__( self , UpperCamelCase_ ):
return self.position == cell.position
def __snake_case ( self ):
print(self.position )
class a :
def __init__( self , UpperCamelCase_=(5, 5) ):
UpperCAmelCase__ : List[Any] = np.zeros(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = world_size[0]
UpperCAmelCase__ : str = world_size[1]
def __snake_case ( self ):
print(self.w )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase__ : Optional[int] = cell.position[0]
UpperCAmelCase__ : Any = cell.position[1]
UpperCAmelCase__ : Dict = []
for n in neughbour_cord:
UpperCAmelCase__ : List[Any] = current_x + n[0]
UpperCAmelCase__ : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase__ : Optional[int] = Cell()
UpperCAmelCase__ : Tuple = (x, y)
UpperCAmelCase__ : Tuple = cell
neighbours.append(UpperCamelCase_ )
return neighbours
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
_open.append(_snake_case )
while _open:
UpperCAmelCase__ : Any = np.argmin([n.f for n in _open] )
UpperCAmelCase__ : List[str] = _open[min_f]
_closed.append(_open.pop(_snake_case ) )
if current == goal:
break
for n in world.get_neigbours(_snake_case ):
for c in _closed:
if c == n:
continue
UpperCAmelCase__ : List[str] = current.g + 1
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = n.position
UpperCAmelCase__ , UpperCAmelCase__ : int = goal.position
UpperCAmelCase__ : List[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase__ : Union[str, Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_snake_case )
UpperCAmelCase__ : Optional[Any] = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase__ : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase__ = Gridworld()
# Start position and goal
UpperCamelCase__ = Cell()
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = Cell()
UpperCamelCase__ = (4, 4)
print(f'path from {start.position} to {goal.position}')
UpperCamelCase__ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase__ = 1
print(world.w)
| 254
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Optional[Any] = len([g for position, g in enumerate(_snake_case ) if g == main_target[position]] )
return (item, float(_snake_case ))
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : str = random.randint(0 ,len(_snake_case ) - 1 )
UpperCAmelCase__ : str = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase__ : int = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Tuple = list(_snake_case )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
UpperCAmelCase__ : int = random.choice(_snake_case )
return "".join(_snake_case )
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase__ : Tuple = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase__ : int = int(parent_a[1] * 100 ) + 1
UpperCAmelCase__ : Any = 10 if child_n >= 10 else child_n
for _ in range(_snake_case ):
UpperCAmelCase__ : Any = population_score[random.randint(0 ,_snake_case )][0]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = crossover(parent_a[0] ,_snake_case )
# Append new string to the population list.
pop.append(mutate(_snake_case ,_snake_case ) )
pop.append(mutate(_snake_case ,_snake_case ) )
return pop
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase__ : int = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_snake_case )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase__ : str = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_snake_case )
# Generate random starting population.
UpperCAmelCase__ : Any = []
for _ in range(_snake_case ):
population.append(''.join([random.choice(_snake_case ) for i in range(len(_snake_case ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_snake_case )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase__ : Union[str, Any] = [evaluate(_snake_case ,_snake_case ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase__ : List[str] = sorted(_snake_case ,key=lambda _snake_case : x[1] ,reverse=_snake_case )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase__ : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_snake_case )
# Normalize population score to be between 0 and 1.
UpperCAmelCase__ : List[Any] = [
(item, score / len(_snake_case )) for item, score in population_score
]
# This is selection
for i in range(_snake_case ):
population.extend(select(population_score[int(_snake_case )] ,_snake_case ,_snake_case ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_snake_case ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCamelCase__ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 254
| 1
|
from manim import *
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = Rectangle(height=0.5 , width=0.5)
lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
lowercase__ : List[Any] = [mem.copy() for i in range(6)]
lowercase__ : Tuple = [mem.copy() for i in range(6)]
lowercase__ : List[str] = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Tuple = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : List[Any] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Dict = Text("""CPU""" , font_size=24)
lowercase__ : List[str] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
cpu.move_to([-2.5, -0.5, 0])
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : str = [mem.copy() for i in range(1)]
lowercase__ : Any = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[int] = Text("""GPU""" , font_size=24)
lowercase__ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
gpu.set_x(gpu.get_x() - 1)
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = [mem.copy() for i in range(6)]
lowercase__ : str = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[Any] = Text("""Model""" , font_size=24)
lowercase__ : Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
model.move_to([3, -1.0, 0])
self.play(
Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , )
lowercase__ : Optional[Any] = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
lowercase__ : Any = Square(side_length=2.2)
key.move_to([-5, 2, 0])
lowercase__ : Optional[int] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5) , Write(SCREAMING_SNAKE_CASE_) , Write(SCREAMING_SNAKE_CASE_))
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = []
lowercase__ : str = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7)
cpu_target.move_to(SCREAMING_SNAKE_CASE_)
cpu_target.generate_target()
lowercase__ : Union[str, Any] = 0.4_6 / 4
lowercase__ : Tuple = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
cpu_targs.append(SCREAMING_SNAKE_CASE_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(SCREAMING_SNAKE_CASE_))
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5))
self.play(*SCREAMING_SNAKE_CASE_)
self.play(*SCREAMING_SNAKE_CASE_)
self.wait()
| 12
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12
| 1
|
import math
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a_ )
def snake_case_ ( lowercase__ : float = 1 / 1_23_45 ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =3
while True:
_lowerCAmelCase =(integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a_ ):
_lowerCAmelCase =int(a_ )
total_partitions += 1
if check_partition_perfect(a_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a_ )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 718
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Any = KandinskyVaaImgaImgPipeline
a_: Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image"""]
a_: int = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a_: Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_: Dict = False
@property
def lowerCAmelCase__ ( self : Any ):
return 32
@property
def lowerCAmelCase__ ( self : str ):
return 32
@property
def lowerCAmelCase__ ( self : Dict ):
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Any ):
return 100
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCAmelCase ={
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase =UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self : str ):
torch.manual_seed(0 )
_lowerCAmelCase =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.dummy_unet
_lowerCAmelCase =self.dummy_movq
_lowerCAmelCase ={
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase =DDIMScheduler(**lowerCamelCase_ )
_lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=0 ):
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
if str(lowerCamelCase_ ).startswith("""mps""" ):
_lowerCAmelCase =torch.manual_seed(lowerCamelCase_ )
else:
_lowerCAmelCase =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowerCAmelCase ={
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
_lowerCAmelCase ="""cpu"""
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**lowerCamelCase_ )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
_lowerCAmelCase =output.images
_lowerCAmelCase =pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase =np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase ="""A red cartoon frog, 4k"""
_lowerCAmelCase =KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
_lowerCAmelCase =KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_lowerCAmelCase =pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase =pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase =pipeline(
image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 149
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
lowerCAmelCase__: Optional[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase__: Union[str, Any] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
lowerCAmelCase__: List[Any] = BeautifulSoup(res.text, "html.parser")
lowerCAmelCase__: int = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 345
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ["""pixel_values"""]
def __init__(self : str , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : int = 8 , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_pad
lowercase__ = pad_size
def UpperCamelCase__ (self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Tuple ):
'''simple docstring'''
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ (self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase__ ,lowercase__ = get_image_size(UpperCamelCase )
lowercase__ = (old_height // size + 1) * size - old_height
lowercase__ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : int , ):
'''simple docstring'''
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_pad if do_pad is not None else self.do_pad
lowercase__ = pad_size if pad_size is not None else self.pad_size
lowercase__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_pad:
lowercase__ = [self.pad(UpperCamelCase , size=UpperCamelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 460
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = LEDTokenizer
__magic_name__ = LEDTokenizerFast
__magic_name__ = True
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
a : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a : List[Any] = dict(zip(A , range(len(A ) ) ) )
a : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a : List[str] = {'unk_token': '<unk>'}
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def lowerCamelCase__ ( self : Any , **A : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def lowerCamelCase__ ( self : Optional[int] , **A : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def lowerCamelCase__ ( self : Optional[int] , A : Union[str, Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a : Union[str, Any] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : str = tokenizer(A , max_length=len(A ) , padding=A , return_tensors='pt' )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : int = tokenizer(A , padding=A , return_tensors='pt' )
self.assertIn('input_ids' , A )
self.assertIn('attention_mask' , A )
self.assertNotIn('labels' , A )
self.assertNotIn('decoder_attention_mask' , A )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Any = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : Any = tokenizer(text_target=A , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : List[Any] = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=A , truncation=A , return_tensors='pt' )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : str = ['A long paragraph for summarization.']
a : Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : Optional[int] = tokenizer(A , return_tensors='pt' )
a : Optional[Any] = tokenizer(text_target=A , return_tensors='pt' )
a : str = inputs['input_ids']
a : List[str] = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : Optional[Any] = ['Summary of the text.', 'Another summary.']
a : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
a : Tuple = tokenizer(A , padding=A )
a : Union[str, Any] = [[0] * len(A ) for x in encoded_output['input_ids']]
a : Optional[int] = tokenizer.pad(A )
self.assertSequenceEqual(outputs['global_attention_mask'] , A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
a : str = self.tokenizer_class.from_pretrained(A , **A )
a : Optional[int] = 'A, <mask> AllenNLP sentence.'
a : Optional[int] = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
a : Dict = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
a : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 118
|
"""simple docstring"""
def snake_case (A_ :int , A_ :int ):
'''simple docstring'''
return base * power(A_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
_UpperCamelCase : Any = int(input('Enter the base: ').strip())
_UpperCamelCase : str = int(input('Enter the exponent: ').strip())
_UpperCamelCase : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_UpperCamelCase : Union[str, Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 118
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """xmod"""
def __init__( self : List[Any] , _lowerCAmelCase : Any=3_0_5_2_2 , _lowerCAmelCase : Tuple=7_6_8 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : int=3_0_7_2 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=5_1_2 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]="absolute" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : str=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=("en_XX",) , _lowerCAmelCase : int=None , **_lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =hidden_act
__lowercase =intermediate_size
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =position_embedding_type
__lowercase =use_cache
__lowercase =classifier_dropout
__lowercase =pre_norm
__lowercase =adapter_reduction_factor
__lowercase =adapter_layer_norm
__lowercase =adapter_reuse_layer_norm
__lowercase =ln_before_adapter
__lowercase =list(_lowerCAmelCase)
__lowercase =default_language
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 474
| 1
|
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :int = len(snake_case__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ :str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
UpperCamelCase__ :Dict = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase__ :Optional[Any] = left
UpperCamelCase__ :Union[str, Any] = point
elif point > right:
UpperCamelCase__ :str = right
UpperCamelCase__ :Optional[Any] = point
else:
if item < current_item:
UpperCamelCase__ :Optional[int] = point - 1
else:
UpperCamelCase__ :Dict = point + 1
return None
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ :Optional[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
elif point > right:
return interpolation_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
snake_case__ , snake_case__ , snake_case__ , point - 1 )
else:
return interpolation_search_by_recursion(
snake_case__ , snake_case__ , point + 1 , snake_case__ )
def a ( __a ) -> Tuple:
'''simple docstring'''
if collection != sorted(snake_case__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
__snake_case = 0
if debug == 1:
__snake_case = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__snake_case = 67
__snake_case = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 700
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a , __a ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__a , __a , __a=0 , __a=None ):
UpperCamelCase__ :Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ :List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ :Union[str, Any] = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ :str = (output_size, output_size) if isinstance(__a , __a ) else output_size
UpperCamelCase__ , UpperCamelCase__ :int = get_image_size(__a )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = output_size
# determine new height and width
UpperCamelCase__ :List[str] = output_height / input_height
UpperCamelCase__ :str = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ :str = scale_width
else:
# fit height
UpperCamelCase__ :int = scale_height
UpperCamelCase__ :Optional[int] = constraint_to_multiple_of(scale_height * input_height , multiple=__a )
UpperCamelCase__ :List[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__a )
return (new_height, new_width)
class lowercase ( A__ ):
"""simple docstring"""
_a = ['pixel_values']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = False , UpperCamelCase_ = 1 , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCamelCase__ :Dict = get_size_dict(UpperCamelCase_ )
UpperCamelCase__ :Dict = do_resize
UpperCamelCase__ :Union[str, Any] = size
UpperCamelCase__ :List[Any] = keep_aspect_ratio
UpperCamelCase__ :Optional[int] = ensure_multiple_of
UpperCamelCase__ :Union[str, Any] = resample
UpperCamelCase__ :str = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :List[str] = do_normalize
UpperCamelCase__ :List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ :Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = 1 , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ :int = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :Any = size if size is not None else self.size
UpperCamelCase__ :List[str] = get_size_dict(UpperCamelCase_ )
UpperCamelCase__ :Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ :str = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ :Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase__ :str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ :List[str] = image_std if image_std is not None else self.image_std
UpperCamelCase__ :List[str] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ :str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ :str = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ :int = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ :str = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCamelCase__ :Optional[int] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase__ :List[str] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase_ ):
UpperCamelCase__ :Union[str, Any] = target_sizes.numpy()
UpperCamelCase__ :int = []
for idx in range(len(UpperCamelCase_ ) ):
UpperCamelCase__ :str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
UpperCamelCase__ :Any = logits.argmax(dim=1 )
UpperCamelCase__ :Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 280
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.